Index: /issm/trunk/configure.ac
===================================================================
--- /issm/trunk/configure.ac	(revision 24685)
+++ /issm/trunk/configure.ac	(revision 24686)
@@ -2,5 +2,5 @@
 
 #AUTOCONF
-AC_INIT([Ice Sheet System Model (ISSM)],[4.16],[issm@jpl.nasa.gov],[issm],[http://issm.jpl.nasa.gov]) #Initializing configure
+AC_INIT([Ice Sheet System Model (ISSM)],[4.17],[issm@jpl.nasa.gov],[issm],[http://issm.jpl.nasa.gov]) #Initializing configure
 AC_CONFIG_AUX_DIR([./aux-config])         #Put config files in aux-config
 AC_CONFIG_MACRO_DIR([m4])                 #m4 macros are located in m4
Index: /issm/trunk/etc/environment.sh
===================================================================
--- /issm/trunk/etc/environment.sh	(revision 24685)
+++ /issm/trunk/etc/environment.sh	(revision 24686)
@@ -1,91 +1,167 @@
-# Modifies path-related envrionment variables based on which external packages 
+# Modifies path-related envrionment variables based on which external packages
 # have been installed.
 #
-# ISSM_DIR and ISSM_ARCH should have been defined already in your shell 
+# ISSM_DIR and ISSM_ARCH should have been defined already in your shell
 # settings file (i.e. .bashrc, .cshrc).
+#
+# TODO:
+# - Condition all path modifications on existence of external package 'install'
+#	directory
+#
 
 ## Functions
 #
-pathprepend(){ #{{{
-	if [ -d "$1" ] && [[ ":$PATH:" != *":$1:"* ]]; then
-		name=$1
-		if [[ "$ISSM_ARCH" == "cygwin-intel" ]]; then
-			#export path using the cygwin convention
-			name=`cygpath -u $1`
-		fi
-		export PATH="$name:$PATH"
-	fi
-} #}}}
-pathappend(){ #{{{
-	if [ -d "$1" ] && [[ ":$PATH:" != *":$1:"* ]]; then
-		name=$1
-		if [[ "$ISSM_ARCH" == "cygwin-intel" ]]; then
-			#export path in cygwin convention
-			name=`cygpath -u $1`
-		fi
-		export PATH="$PATH:$name"
-	fi
-} #}}}
-libpathprepend(){ #{{{
-	if [ -d "$1" ]; then
-		if [ -z $LD_LIBRARY_PATH ]; then
-			export LD_LIBRARY_PATH="$1"
-		elif [[ ":$LD_LIBRARY_PATH:" != *":$1:"* ]]; then
-			export LD_LIBRARY_PATH="$1:$LD_LIBRARY_PATH"
-		fi
-		if [ -z $LD_RUN_PATH ]; then
-			export LD_RUN_PATH="$1"
-		elif [[ ":$LD_RUN_PATH:" != *":$1:"* ]]; then
-			export LD_RUN_PATH="$1:$LD_RUN_PATH"
-		fi
-	fi
-} #}}}
-libpathappend(){ #{{{
-	if [ -d "$1" ]; then
-		if [ -z $LD_LIBRARY_PATH ]; then
-			export LD_LIBRARY_PATH=$1
-		elif [[ ":$LD_LIBRARY_PATH:" != *":$1:"* ]]; then
-			export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$1"
+c_include_path_append(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $C_INCLUDE_PATH ]; then
+			export C_INCLUDE_PATH="${1}"
+		elif [[ ":${C_INCLUDE_PATH}:" != *":${1}:"* ]]; then
+			export C_INCLUDE_PATH="${C_INCLUDE_PATH}:${1}"
+		fi
+	fi
+} #}}}
+c_include_path_prepend(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $C_INCLUDE_PATH ]; then
+			export C_INCLUDE_PATH="${1}"
+		elif [[ ":${C_INCLUDE_PATH}:" != *":${1}:"* ]]; then
+			export C_INCLUDE_PATH="${1}:${C_INCLUDE_PATH}"
+		fi
+	fi
+} #}}}
+
+cpath_append(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $CPATH ]; then
+			export CPATH="${1}"
+		elif [[ ":${CPATH}:" != *":${1}:"* ]]; then
+			export CPATH="${CPATH}:${1}"
+		fi
+	fi
+} #}}}
+cpath_prepend(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $CPATH ]; then
+			export CPATH="${1}"
+		elif [[ ":${CPATH}:" != *":${1}:"* ]]; then
+			export CPATH="${1}:${CPATH}"
+		fi
+	fi
+} #}}}
+
+cplus_include_path_append(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $CPLUS_INCLUDE_PATH ]; then
+			export CPLUS_INCLUDE_PATH="${1}"
+		elif [[ ":${CPLUS_INCLUDE_PATH}:" != *":${1}:"* ]]; then
+			export CPLUS_INCLUDE_PATH="${CPLUS_INCLUDE_PATH}:${1}"
+		fi
+	fi
+} #}}}
+cplus_include_path_prepend(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $CPLUS_INCLUDE_PATH ]; then
+			export CPLUS_INCLUDE_PATH="${1}"
+		elif [[ ":${CPLUS_INCLUDE_PATH}:" != *":${1}:"* ]]; then
+			export CPLUS_INCLUDE_PATH="${1}:${CPLUS_INCLUDE_PATH}"
+		fi
+	fi
+} #}}}
+
+dyld_library_path_append(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $DYLD_LIBRARY_PATH ]; then
+			export DYLD_LIBRARY_PATH="${1}"
+		elif [[ ":${DYLD_LIBRARY_PATH}:" != *":${1}:"* ]]; then
+			export DYLD_LIBRARY_PATH="${DYLD_LIBRARY_PATH}:${1}"
 		fi
 		if [ -z $LD_RUN_PATH ]; then
 			export LD_RUN_PATH=$1
-		elif [[ ":$LD_RUN_PATH:" != *":$1:"* ]]; then
-			export LD_RUN_PATH="$LD_RUN_PATH:$1"
-		fi
-	fi
-} #}}}
-dylibpathprepend(){ #{{{
-	if [ -d "$1" ]; then
+		elif [[ ":${LD_RUN_PATH}:" != *":${1}:"* ]]; then
+			export LD_RUN_PATH="${LD_RUN_PATH}:${1}"
+		fi
+	fi
+} #}}}
+dyld_library_path_prepend(){ #{{{
+	if [ -d "${1}" ]; then
 		if [ -z $DYLD_LIBRARY_PATH ]; then
-			export DYLD_LIBRARY_PATH=$1
-		elif [[ ":$DYLD_LIBRARY_PATH:" != *":$1:"* ]]; then
-			export DYLD_LIBRARY_PATH="$1:$DYLD_LIBRARY_PATH"
+			export DYLD_LIBRARY_PATH="${1}"
+		elif [[ ":${DYLD_LIBRARY_PATH}:" != *":${1}:"* ]]; then
+			export DYLD_LIBRARY_PATH="${1}:${DYLD_LIBRARY_PATH}"
 		fi
 		if [ -z $LD_RUN_PATH ]; then
-			export LD_RUN_PATH=$1
-		elif [[ ":$LD_RUN_PATH:" != *":$1:"* ]]; then
-			export LD_RUN_PATH="$1:$LD_RUN_PATH"
-		fi
-	fi
-} #}}}
-dylibpathappend(){ #{{{
-	if [ -d "$1" ]; then
-		if [ -z $DYLD_LIBRARY_PATH ]; then
-			export DYLD_LIBRARY_PATH=$1
-		elif [[ ":$DYLD_LIBRARY_PATH:" != *":$1:"* ]]; then
-			export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$1"
+			export LD_RUN_PATH="${1}"
+		elif [[ ":${LD_RUN_PATH}:" != *":${1}:"* ]]; then
+			export LD_RUN_PATH="${1}:${LD_RUN_PATH}"
+		fi
+	fi
+} #}}}
+
+ld_library_path_append(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $LD_LIBRARY_PATH ]; then
+			export LD_LIBRARY_PATH="${1}"
+		elif [[ ":${LD_LIBRARY_PATH}:" != *":${1}:"* ]]; then
+			export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${1}"
 		fi
 		if [ -z $LD_RUN_PATH ]; then
-			export LD_RUN_PATH=$1
-		elif [[ ":$LD_RUN_PATH:" != *":$1:"* ]]; then
-			export LD_RUN_PATH="$LD_RUN_PATH:$1"
-		fi
-	fi
-} #}}}
-
-# FIXME: during installation packages are installed one by one but 
-# environment.sh was sourced before so new packages are NOT in the path. May 
-# source environment.sh again with:
-# 	if [ -z $(echo "$PATH" | grep "$MATLAB_DIR") ]; then export $PATH...; fi
+			export LD_RUN_PATH="${1}"
+		elif [[ ":${LD_RUN_PATH}:" != *":$1:"* ]]; then
+			export LD_RUN_PATH="${LD_RUN_PATH}:${1}"
+		fi
+	fi
+} #}}}
+ld_library_path_prepend(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $LD_LIBRARY_PATH ]; then
+			export LD_LIBRARY_PATH="${1}"
+		elif [[ ":${LD_LIBRARY_PATH}:" != *":${1}:"* ]]; then
+			export LD_LIBRARY_PATH="${1}:${LD_LIBRARY_PATH}"
+		fi
+		if [ -z $LD_RUN_PATH ]; then
+			export LD_RUN_PATH="${1}"
+		elif [[ ":${LD_RUN_PATH}:" != *":${1}:"* ]]; then
+			export LD_RUN_PATH="${1}:${LD_RUN_PATH}"
+		fi
+	fi
+} #}}}
+
+library_path_append(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $LIBRARY_PATH ]; then
+			export LIBRARY_PATH="${1}"
+		elif [[ ":${LIBRARY_PATH}:" != *":$1:"* ]]; then
+			export LIBRARY_PATH="${LIBRARY_PATH}:${1}"
+		fi
+	fi
+} #}}}
+library_path_prepend(){ #{{{
+	if [ -d "${1}" ]; then
+		if [ -z $LIBRARY_PATH ]; then
+			export LIBRARY_PATH="${1}"
+		elif [[ ":${LIBRARY_PATH}:" != *":$1:"* ]]; then
+			export LIBRARY_PATH="${1}:${LIBRARY_PATH}"
+		fi
+	fi
+} #}}}
+
+path_append(){ #{{{
+	if [ -d "${1}" ] && [[ ":${PATH}:" != *":${1}:"* ]]; then
+		path="${1}"
+		if [[ "${ISSM_ARCH}" == "cygwin-intel" ]]; then
+			path=`cygpath -u "${1}"`
+		fi
+		export PATH="${PATH}:${path}"
+	fi
+} #}}}
+path_prepend(){ #{{{
+	if [ -d "${1}" ] && [[ ":${PATH}:" != *":${1}:"* ]]; then
+		path="${1}"
+		if [[ "${ISSM_ARCH}" == "cygwin-intel" ]]; then
+			path=`cygpath -u "${1}"`
+		fi
+		export PATH="${path}:${PATH}"
+	fi
+} #}}}
 
 # Windows compilers:
@@ -95,256 +171,295 @@
 
 # Load ISSM scripts
-pathappend "$ISSM_DIR/scripts"
-
-GMT_DIR="$ISSM_DIR/externalpackages/gmt/install"
-if [ -d "$GMT_DIR" ]; then
-	export GMT_DIR
-	pathprepend   "$GMT_DIR/bin" 
-	libpathappend "$GMT_DIR/lib"
-fi
-
-MPI_DIR="$ISSM_DIR/externalpackages/mpich/install"
-if [ -d "$MPI_DIR" ]; then
+path_append "${ISSM_DIR}/scripts"
+
+SVN_DIR="${ISSM_DIR}/externalpackages/svn/install"
+if [ -d "${SVN_DIR}" ]; then
+	path_prepend   "${SVN_DIR}/bin"
+	ld_library_path_append "${SVN_DIR}/lib"
+fi
+
+GIT_DIR="${ISSM_DIR}/externalpackages/git/install"
+if [ -d "${GIT_DIR}" ]; then
+	path_prepend "${GIT_DIR}/bin"
+fi
+
+MPI_DIR="${ISSM_DIR}/externalpackages/mpich/install"
+if [ -d "${MPI_DIR}" ]; then
 	export MPI_DIR
+	export MPI_HOME=${MPI_DIR} # Needed by Dakota
 	export MPI_INC_DIR="$MPI_DIR/include"
-	pathprepend    "$MPI_DIR/bin"
-	libpathprepend "$MPI_DIR/lib"
-fi
-
-PETSC_DIR="$ISSM_DIR/externalpackages/petsc/install"
+	path_prepend "${MPI_DIR}/bin"
+	cpath_prepend "${MPI_DIR}/include"
+	ld_library_path_append "${MPI_DIR}/lib"
+fi
+
+PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/install"
 if [ -d "$PETSC_DIR" ]; then
 	export PETSC_DIR
-	libpathappend "$PETSC_DIR/lib"
-fi
-
-SCOTCH_DIR="$ISSM_DIR/externalpackages/scotch/install"
-libpathappend "$SCOTCH_DIR/lib"
-
-SLEPC_DIR="$ISSM_DIR/externalpackages/slepc/install"
-libpathappend "$SLEPC_DIR/lib"
-
-TAO_DIR="$ISSM_DIR/externalpackages/tao/install"
-libpathappend "$TAO_DIR/lib"
-
-DAKOTA_DIR="$ISSM_DIR/externalpackages/dakota/install"
-pathappend "$DAKOTA_DIR/bin"
-libpathappend "$DAKOTA_DIR/lib"
-dylibpathprepend "$DAKOTA_DIR/lib"
-
-DOXYGEN_DIR="$ISSM_DIR/externalpackages/doxygen/install"
-pathprepend "$DOXYGEN_DIR/bin"
-
-AUTOTOOLS_DIR="$ISSM_DIR/externalpackages/autotools/install"
-pathprepend "$AUTOTOOLS_DIR/bin"
+	ld_library_path_append "${PETSC_DIR}/lib"
+
+	# In case we have installed MPICH via PETSc
+	if [ -f "${PETSC_DIR}/bin/mpiexec" ]; then
+		MPI_DIR=${PETSC_DIR}
+		export MPI_DIR
+		export MPI_HOME=${MPI_DIR} # Needed by Dakota
+		export MPI_INC_DIR="${MPI_DIR}/include"
+		path_prepend "${MPI_DIR}/bin"
+		cpath_prepend "${MPI_DIR}/include"
+	fi
+fi
+
+LAPACK_DIR="${ISSM_DIR}/externalpackages/lapack/install"
+ld_library_path_append "${LAPACK_DIR}/lib"
+
+SCOTCH_DIR="${ISSM_DIR}/externalpackages/scotch/install"
+ld_library_path_append "$SCOTCH_DIR/lib"
+
+SLEPC_DIR="${ISSM_DIR}/externalpackages/slepc/install"
+ld_library_path_append "$SLEPC_DIR/lib"
+
+TAO_DIR="${ISSM_DIR}/externalpackages/tao/install"
+ld_library_path_append "$TAO_DIR/lib"
+
+DAKOTA_DIR="${ISSM_DIR}/externalpackages/dakota/install"
+if [ -d "${DAKOTA_DIR}" ]; then
+	path_append "${DAKOTA_DIR}/bin"
+	ld_library_path_append "${DAKOTA_DIR}/lib"
+	dyld_library_path_prepend "${DAKOTA_DIR}/lib"
+fi
+
+DOXYGEN_DIR="${ISSM_DIR}/externalpackages/doxygen/install"
+path_prepend "$DOXYGEN_DIR/bin"
+
+AUTOTOOLS_DIR="${ISSM_DIR}/externalpackages/autotools/install"
+path_prepend "$AUTOTOOLS_DIR/bin"
 
 SDK_DIR="C:/MicrosoftVisualStudio 9.0/Microsoft Visual C++ 2008 Express Edition with SP1 - ENU"
-pathappend "$SDK_DIR"
-
-SSH_DIR="$ISSM_DIR/externalpackages/ssh"
-pathappend "$SSH_DIR"
-
-VALGRIND_DIR="$ISSM_DIR/externalpackages/valgrind/install"
-pathprepend "$VALGRIND_DIR/bin"
-
-NCO_DIR="$ISSM_DIR/externalpackages/nco/install/bin"
-pathprepend "$NCO_DIR/bin"
-
-CPPCHECK_DIR="$ISSM_DIR/externalpackages/cppcheck/install"
-pathappend "$CPPCHECK_DIR/bin"
-
-GDAL_DIR="$ISSM_DIR/externalpackages/gdal/install"
-pathprepend "$GDAL_DIR/bin"
-libpathappend "$GDAL_DIR/lib"
-
-PROJ4_DIR="$ISSM_DIR/externalpackages/proj.4/install"
-dylibpathprepend "$PROJ4_DIR/lib"
-libpathprepend "$PROJ4_DIR/lib"
-
-MERCURIAL_DIR="$ISSM_DIR/externalpackages/mercurial/install"
+path_append "$SDK_DIR"
+
+SSH_DIR="${ISSM_DIR}/externalpackages/ssh"
+path_append "$SSH_DIR"
+
+VALGRIND_DIR="${ISSM_DIR}/externalpackages/valgrind/install"
+path_prepend "$VALGRIND_DIR/bin"
+
+NCO_DIR="${ISSM_DIR}/externalpackages/nco/install/bin"
+path_prepend "$NCO_DIR/bin"
+
+CPPCHECK_DIR="${ISSM_DIR}/externalpackages/cppcheck/install"
+path_append "$CPPCHECK_DIR/bin"
+
+MERCURIAL_DIR="${ISSM_DIR}/externalpackages/mercurial/install"
 if [ -d "$MERCURIAL_DIR" ]; then
 	export PYTHONPATH="$PYTHONPATH:$MERCURIAL_DIR/mercurial/pure/"
-	pathappend "$MERCURIAL_DIR"
-fi
-
-BOOST_DIR="$ISSM_DIR/externalpackages/boost/install"
-BOOSTROOT="$ISSM_DIR/externalpackages/boost/install"
-if [ -d "$BOOST_DIR" ]; then
+	path_append "$MERCURIAL_DIR"
+fi
+
+BOOST_DIR="${ISSM_DIR}/externalpackages/boost/install"
+BOOSTROOT="${ISSM_DIR}/externalpackages/boost/install"
+if [ -d "${BOOST_DIR}" ]; then
 	export BOOSTROOT
 	export BOOST_DIR
-	libpathprepend   "$BOOST_DIR/lib"
-	dylibpathprepend "$BOOST_DIR/lib"
-	pathprepend      "$BOOST_DIR/bin"
-fi
-
-XERCESROOT="$ISSM_DIR/externalpackages/xerces/install"
-XERCESCROOT="$ISSM_DIR/externalpackages/xerces/src"
+	library_path_prepend "${BOOST_DIR}/lib"
+	ld_library_path_prepend "${BOOST_DIR}/lib"
+	dyld_library_path_prepend "${BOOST_DIR}/lib"
+	path_prepend "${BOOST_DIR}/bin"
+fi
+
+XERCESROOT="${ISSM_DIR}/externalpackages/xerces/install"
 if [ -d "$XERCESROOT" ]; then
-	export XERCESROOT 
-	export XERCESCROOT
-fi
-
-XAIF_DIR="$ISSM_DIR/externalpackages/xaifbooster/xaifBooster"
-XAIFBOOSTERROOT="$ISSM_DIR/externalpackages/xaifbooster/"
-XAIFBOOSTER_HOME="$ISSM_DIR/externalpackages/xaifbooster/xaifBooster"
-PLATFORM="x86-Linux"
+	export XERCESROOT
+	export XERCESCROOT="${ISSM_DIR}/externalpackages/xerces/src"
+fi
+
+
+XAIFBOOSTERROOT="${ISSM_DIR}/externalpackages/xaifbooster"
+XAIF_DIR="${XAIFBOOSTERROOT}/xaifBooster"
 if [ -d "$XAIF_DIR" ]; then
 	export XAIFBOOSTERROOT
-	export XAIFBOOSTER_HOME
 	export XAIF_DIR
-	export PLATFORM
-fi
-
-ANGELROOT="$ISSM_DIR/externalpackages/angel/angel"
+	export XAIFBOOSTER_HOME=$XAIF_DIR
+	export PLATFORM="x86-Linux"
+fi
+
+ANGELROOT="${ISSM_DIR}/externalpackages/angel/angel"
 if [ -d "$ANGELROOT" ]; then
 	export ANGELROOT
 fi
 
-OPENANALYSISROOT="$ISSM_DIR/externalpackages/openanalysis/install"
+OPENANALYSISROOT="${ISSM_DIR}/externalpackages/openanalysis/install"
 if [ -d "$OPENANALYSISROOT" ]; then
 	export OPENANALYSISROOT
-	libpathappend "$OPENANALYSISROOT/lib"
+	ld_library_path_append "$OPENANALYSISROOT/lib"
 fi
 
 JVM_DIR="/usr/local/gcc/4.3.2/lib64/gcj-4.3.2-9/"
-libpathappend "$JVM_DIR"
-
-BBFTP_DIR="$ISSM_DIR/externalpackages/bbftp/install"
-pathappend "$BBFTP_DIR/bin"
-
-ADIC_DIR="$ISSM_DIR/externalpackages/adic/install"
-pathappend "$ADIC_DIR/bin"
-libpathappend "$ADIC_DIR/lib"
-
-COLPACK_DIR="$ISSM_DIR/externalpackages/colpack/install"
-libpathappend "$COLPACK_DIR/lib"
-
-ECLIPSE_DIR="$ISSM_DIR/externalpackages/eclipse/install"
-pathappend "$ECLIPSE_DIR"
-
-APPSCAN_DIR="$ISSM_DIR/externalpackages/appscan/install"
-pathappend "$APPSCAN_DIR/bin"
-
-RATS_DIR="$ISSM_DIR/externalpackages/rats/install"
-pathappend "$RATS_DIR/bin"
-
-DYSON_DIR="$ISSM_DIR/externalpackages/dyson/"
-pathappend "$DYSON_DIR"
-
-CMAKE_DIR="$ISSM_DIR/externalpackages/cmake/install"
-pathprepend "$CMAKE_DIR/bin"
-
-SHAPELIB_DIR="$ISSM_DIR/externalpackages/shapelib/install"
-pathappend "$SHAPELIB_DIR/exec"
-
-CCCL_DIR="$ISSM_DIR/externalpackages/cccl/install"
-pathappend "$CCCL_DIR/bin"
-
-PACKAGEMAKER_DIR="$ISSM_DIR/externalpackages/packagemaker/install"
-pathappend "$PACKAGEMAKER_DIR"
+ld_library_path_append "$JVM_DIR"
+
+BBFTP_DIR="${ISSM_DIR}/externalpackages/bbftp/install"
+path_append "$BBFTP_DIR/bin"
+
+ADIC_DIR="${ISSM_DIR}/externalpackages/adic/install"
+path_append "$ADIC_DIR/bin"
+ld_library_path_append "$ADIC_DIR/lib"
+
+COLPACK_DIR="${ISSM_DIR}/externalpackages/colpack/install"
+ld_library_path_append "$COLPACK_DIR/lib"
+
+ECLIPSE_DIR="${ISSM_DIR}/externalpackages/eclipse/install"
+path_append "$ECLIPSE_DIR"
+
+APPSCAN_DIR="${ISSM_DIR}/externalpackages/appscan/install"
+path_append "$APPSCAN_DIR/bin"
+
+RATS_DIR="${ISSM_DIR}/externalpackages/rats/install"
+path_append "$RATS_DIR/bin"
+
+DYSON_DIR="${ISSM_DIR}/externalpackages/dyson/"
+path_append "$DYSON_DIR"
+
+CMAKE_DIR="${ISSM_DIR}/externalpackages/cmake/install"
+path_prepend "$CMAKE_DIR/bin"
+
+SHAPELIB_DIR="${ISSM_DIR}/externalpackages/shapelib/install"
+path_append "$SHAPELIB_DIR/exec"
+
+CCCL_DIR="${ISSM_DIR}/externalpackages/cccl/install"
+path_append "$CCCL_DIR/bin"
+
+PACKAGEMAKER_DIR="${ISSM_DIR}/externalpackages/packagemaker/install"
+path_append "$PACKAGEMAKER_DIR"
 
 #android-dev-dir
-export ANDROID_DIR="$ISSM_DIR/externalpackages/android"
+export ANDROID_DIR="${ISSM_DIR}/externalpackages/android"
 
 export ANDROID_NDK_DIR="$ANDROID_DIR/android-ndk/install"
-pathappend "$ANDROID_NDK_DIR/arm-linux-android-install/bin"
+path_append "$ANDROID_NDK_DIR/arm-linux-android-install/bin"
 
 export ANDROID_SDK_DIR="$ANDROID_DIR/android-sdk/install"
-pathappend "$ANDROID_SDK_DIR/"
-
-GSL_DIR="$ISSM_DIR/externalpackages/gsl/install"
-libpathappend "$GSL_DIR/lib"
-
-GMAKE_DIR="$ISSM_DIR/externalpackages/gmake/install"
-pathprepend "$GMAKE_DIR/bin"
-
-MODELE_DIR="$ISSM_DIR/externalpackages/modelE/install"
-pathappend "$MODELE_DIR/src/exec"
-
-GIT_DIR="$ISSM_DIR/externalpackages/git/install"
-pathprepend "$GIT_DIR/bin"
-
-NCVIEW_DIR="$ISSM_DIR/externalpackages/ncview/install"
-pathappend "$NCVIEW_DIR"
-
-TCLX_DIR="$ISSM_DIR/externalpackages/tclx/install/lib/tclx8.4"
-libpathappend "$TCLX_DIR"
-
-ASPELL_DIR="$ISSM_DIR/externalpackages/aspell/install"
-pathappend "$ASPELL_DIR/bin"
-
-HDF5_DIR="$ISSM_DIR/externalpackages/hdf5/install"
-dylibpathappend "$HDF5_DIR/lib"
-libpathappend "$HDF5_DIR/lib"
-if [ -d "$HDF5_DIR" ]; then
-	export LIBRARY_PATH="$LIBRARY_PATH:$HDF5_DIR/lib"
-	export C_INCLUDE_PATH="$C_INCLUDE_PATH:$HDF5_DIR/include"
-fi
-
-NETCDF_DIR="$ISSM_DIR/externalpackages/netcdf/install"
-pathappend "$NETCDF_DIR/bin"
-dylibpathappend "$NETCDF_DIR/lib"
-libpathappend "$NETCDF_DIR/lib"
-if [ -d "$NETCDF_DIR" ]; then
-	export LIBRARY_PATH="$LIBRARY_PATH:$NETCDF_DIR/lib"
-	dylibpathappend "$NETCDF_DIR/lib"
-	libpathappend "$NETCDF_DIR/lib"
-	export C_INCLUDE_PATH="$C_INCLUDE_PATH:$NETCDF_DIR/include"
-fi
-
-NETCDF_CXX_DIR="$ISSM_DIR/externalpackages/netcdf-cxx/install"
-libpathappend "$NETCDF_CXX_DIR/lib"
-
-SVN_DIR="$ISSM_DIR/externalpackages/svn/install"
-pathprepend   "$SVN_DIR/bin"
-libpathappend "$SVN_DIR/lib"
-
-CVS_DIR="$ISSM_DIR/externalpackages/cvs/install"
-pathprepend   "$CVS_DIR/bin"
-
-APR_DIR="$ISSM_DIR/externalpackages/apr/install"
-pathappend    "$APR_DIR/bin"
-libpathappend "$APR_DIR/lib"
-
-APR_UTIL_DIR="$ISSM_DIR/externalpackages/apr-util/install"
-pathappend   "$APR_UTIL_DIR/bin:$PATH"
-libpathappend "$APR_UTIL_DIR/lib"
-
-SQLITE_DIR="$ISSM_DIR/externalpackages/sqlite/install"
-pathappend   "$SQLITE_DIR/bin"
-libpathappend "$SQLITE_DIR/lib"
-
-YAMS_DIR="$ISSM_DIR/externalpackages/yams/install"
-pathappend   "$YAMS_DIR"
-
-SWIG_DIR="$ISSM_DIR/externalpackages/swig/install"
-pathappend   "$SWIG_DIR"
+path_append "$ANDROID_SDK_DIR/"
+
+GSL_DIR="${ISSM_DIR}/externalpackages/gsl/install"
+ld_library_path_append "$GSL_DIR/lib"
+
+GMAKE_DIR="${ISSM_DIR}/externalpackages/gmake/install"
+path_prepend "$GMAKE_DIR/bin"
+
+MODELE_DIR="${ISSM_DIR}/externalpackages/modelE/install"
+path_append "$MODELE_DIR/src/exec"
+
+NCVIEW_DIR="${ISSM_DIR}/externalpackages/ncview/install"
+path_append "$NCVIEW_DIR"
+
+TCLX_DIR="${ISSM_DIR}/externalpackages/tclx/install/lib/tclx8.4"
+ld_library_path_append "$TCLX_DIR"
+
+ASPELL_DIR="${ISSM_DIR}/externalpackages/aspell/install"
+path_append "$ASPELL_DIR/bin"
+
+NETCDF_DIR="${ISSM_DIR}/externalpackages/netcdf/install"
+if [ -d "${NETCDF_DIR}" ]; then
+	path_append "${NETCDF_DIR}/bin"
+	cpath_append "${NETCDF_DIR}/include"
+	library_path_append "${NETCDF_DIR}/lib"
+	dyld_library_path_append "${NETCDF_DIR}/lib"
+	ld_library_path_append "${NETCDF_DIR}/lib"
+fi
+
+NETCDF_CXX_DIR="${ISSM_DIR}/externalpackages/netcdf-cxx/install"
+if [ -d "${NETCDF_CXX_DIR}" ]; then
+	ld_library_path_append "${NETCDF_CXX_DIR}/lib"
+fi
+
+HDF5_DIR="${ISSM_DIR}/externalpackages/hdf5/install"
+if [ -d "${HDF5_DIR}" ]; then
+	cpath_append "${HDF5_DIR}/include"
+	library_path_append "${HDF5_DIR}/lib"
+	dyld_library_path_append "${HDF5_DIR}/lib"
+	ld_library_path_append "${HDF5_DIR}/lib"
+fi
+
+SQLITE_DIR="${ISSM_DIR}/externalpackages/sqlite/install"
+if [ -d "${SQLITE_DIR}" ]; then
+	path_append "${SQLITE_DIR}/bin"
+	ld_library_path_append "${SQLITE_DIR}/lib"
+fi
+
+PROJ4_DIR="${ISSM_DIR}/externalpackages/proj.4/install"
+if [ -d "${PROJ4_DIR}" ]; then
+	dyld_library_path_prepend "${PROJ4_DIR}/lib"
+	ld_library_path_prepend "${PROJ4_DIR}/lib"
+fi
+
+PROJ_DIR="${ISSM_DIR}/externalpackages/proj/install"
+if [ -d "${PROJ_DIR}" ]; then
+	dyld_library_path_prepend "${PROJ_DIR}/lib"
+	ld_library_path_prepend "${PROJ_DIR}/lib"
+fi
+
+GDAL_DIR="${ISSM_DIR}/externalpackages/gdal/install"
+if [ -d "${GDAL_DIR}" ]; then
+	path_prepend "${GDAL_DIR}/bin"
+	ld_library_path_append "${GDAL_DIR}/lib"
+fi
+
+GMT_DIR="${ISSM_DIR}/externalpackages/gmt/install"
+if [ -d "${GMT_DIR}" ]; then
+	export GMT_DIR
+	path_prepend "${GMT_DIR}/bin"
+fi
+
+GMSH_DIR="${ISSM_DIR}/externalpackages/gmsh/install"
+if [ -d "${GMSH_DIR}" ]; then
+	path_append "${ISSM_DIR}/externalpackages/gmsh/install"
+fi
+
+CVS_DIR="${ISSM_DIR}/externalpackages/cvs/install"
+path_prepend "$CVS_DIR/bin"
+
+APR_DIR="${ISSM_DIR}/externalpackages/apr/install"
+path_append "$APR_DIR/bin"
+ld_library_path_append "$APR_DIR/lib"
+
+APR_UTIL_DIR="${ISSM_DIR}/externalpackages/apr-util/install"
+path_prepend "$APR_UTIL_DIR/bin"
+ld_library_path_append "$APR_UTIL_DIR/lib"
+
+YAMS_DIR="${ISSM_DIR}/externalpackages/yams/install"
+path_append "$YAMS_DIR"
+
+SWIG_DIR="${ISSM_DIR}/externalpackages/swig/install"
+path_append "$SWIG_DIR"
 
 #AUX-CONFIG
-pathappend   "$ISSM_DIR/aux-config"
+path_append "${ISSM_DIR}/aux-config"
 
 #INISHELL
-pathappend   "$ISSM_DIR/externalpackages/inishell/install"
+path_append "${ISSM_DIR}/externalpackages/inishell/install"
 
 #SHELL2JUNIT
-pathappend   "$ISSM_DIR/externalpackages/shell2junit/install"
+path_append "${ISSM_DIR}/externalpackages/shell2junit/install"
 
 #EXPAT
-libpathprepend   "$ISSM_DIR/externalpackages/expat/install"
-dylibpathprepend   "$ISSM_DIR/externalpackages/expat/install"
-
-#GMSH
-pathappend   "$ISSM_DIR/externalpackages/gmsh/install"
+ld_library_path_prepend "${ISSM_DIR}/externalpackages/expat/install"
+dyld_library_path_prepend "${ISSM_DIR}/externalpackages/expat/install"
 
 #CURL
-libpathprepend   "$ISSM_DIR/externalpackages/curl/install/lib"
-dylibpathprepend   "$ISSM_DIR/externalpackages/curl/install/lib"
-pathprepend "$ISSM_DIR/externalpackages/curl/install/bin"
-
-#GMT
-pathprepend "$ISSM_DIR/externalpackages/gmt/install/bin"
+CURL_DIR="${ISSM_DIR}/externalpackages/curl/install"
+if [ -d "${CURL_DIR}" ]; then
+	ld_library_path_prepend "${CURL_DIR}/lib"
+	dyld_library_path_prepend "${CURL_DIR}/lib"
+	path_prepend "${CURL_DIR}/bin"
+fi
 
 #NEOPZ
-NEOPZ_DIR="$ISSM_DIR/externalpackages/neopz/install"
+NEOPZ_DIR="${ISSM_DIR}/externalpackages/neopz/install"
 if [ -d "$NEOPZ_DIR" ]; then
 	export REFPATTERNDIR="$NEOPZ_DIR/include/refpatterns"
 fi
+
+TRIANGLE_DIR="${ISSM_DIR}/externalpackages/triangle/install"
+if [ -d "${TRIANGLE_DIR}" ]; then
+	ld_library_path_append "${TRIANGLE_DIR}/lib"
+	dyld_library_path_append "${TRIANGLE_DIR}/lib"
+fi
Index: /issm/trunk/externalpackages/adjoinablempi/install.sh
===================================================================
--- /issm/trunk/externalpackages/adjoinablempi/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/adjoinablempi/install.sh	(revision 24686)
@@ -2,31 +2,37 @@
 set -eu
 
-#Some cleanup
-rm -rf src  install
 
 # Keeping this for potential future use
-#Mercurial cloning: 
+#Mercurial cloning:
 #hg clone -r 268 http://mercurial.mcs.anl.gov//ad/AdjoinableMPI src
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/adjoinablempi' 'adjoinablempi.tar.gz'
+# Cleanup
+rm -rf install src
+mkdir install src
 
-#Untar ADOL-C
-tar -zxf  adjoinablempi.tar.gz
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/adjoinablempi.tar.gz" "adjoinablempi.tar.gz"
 
-#Configure adjoinablempi
+# Unpack source
+tar -zxvf adjoinablempi.tar.gz
+
+# Configure
 cd src
 ./configure \
-	--prefix="$ISSM_DIR/externalpackages/adjoinablempi/install" \
-	--libdir="$ISSM_DIR/externalpackages/adjoinablempi/install/lib" \
-	--with-mpi-root="$ISSM_DIR/externalpackages/mpich/install" \
+	--prefix="${ISSM_DIR}/externalpackages/adjoinablempi/install" \
+	--libdir="${ISSM_DIR}/externalpackages/adjoinablempi/install/lib" \
+	--with-mpi-root="${ISSM_DIR}/externalpackages/mpich/install" \
 	--enable-requestOnTrace
 
-#Compile adjoinablempi 
+# Clean
 make clean
+
+# Compile
 if [ $# -eq 0 ]; then
-	make 
+	make
 else
 	make -j $1
 fi
+
+# Install
 make install
Index: /issm/trunk/externalpackages/adolc/install-withampi.sh
===================================================================
--- /issm/trunk/externalpackages/adolc/install-withampi.sh	(revision 24685)
+++ /issm/trunk/externalpackages/adolc/install-withampi.sh	(revision 24686)
@@ -1,30 +1,39 @@
 #!/bin/bash
 set -eu
- 
-#Some cleanup
-rm -rf install src
+
 
 # Keeping the following commented line for potential future use.
 #git clone https://gitlab.com/adol-c/adol-c.git src
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/ADOL-C' 'ADOL-C.tar.gz'
+## Environment
+#
+export CFLAGS="-O2 -L${ISSM_DIR}/externalpackages/mpich/install/lib -lmpi"
+export CXXFLAGS="-O2 -std=c++11 -L${ISSM_DIR}/externalpackages/mpich/install/lib -lmpi"
 
-#Untar ADOL-C
-tar -zxf  ADOL-C.tar.gz
+# Cleanup
+rm -rf install src
+mkdir install src
 
-#Compile ADOL-C
-export CFLAGS="-O2 -L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi"
-export CXXFLAGS="-O2 -std=c++11 -L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi"
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/ADOL-C.tar.gz" "ADOL-C.tar.gz"
+
+# Unpack source
+tar -zxvf ADOL-C.tar.gz
+
+# Configure
 cd src
-./configure --prefix=$ISSM_DIR/externalpackages/adolc/install  \
-	--libdir=$ISSM_DIR/externalpackages/adolc/install/lib \
-	--with-mpi-root=$ISSM_DIR/externalpackages/mpich/install \
+./configure \
+	--prefix=${ISSM_DIR}/externalpackages/adolc/install \
+	--libdir=${ISSM_DIR}/externalpackages/adolc/install/lib \
+	--with-mpi-root=${ISSM_DIR}/externalpackages/mpich/install \
 	--enable-ampi \
-	--with-ampi=$ISSM_DIR/externalpackages/adjoinablempi/install \
+	--with-ampi=${ISSM_DIR}/externalpackages/adjoinablempi/install \
 	--with-soname=adolc \
 	--disable-tapedoc-values
 
+# Clean
 make clean
+
+# Compile
 if [ $# -eq 0 ]; then
 	make V=1
@@ -32,3 +41,5 @@
 	make -j $1 V=1
 fi
+
+# Install
 make V=1 install
Index: /issm/trunk/externalpackages/autotools/install-debian-linux.sh
===================================================================
--- /issm/trunk/externalpackages/autotools/install-debian-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/autotools/install-debian-linux.sh	(revision 24686)
@@ -0,0 +1,74 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+AUTOCONF_VER="2.69"
+AUTOMAKE_VER="1.16.1"
+LIBTOOL_VER="2.4.2"
+M4_VER="1.4.18"
+
+## Environment
+#
+export PATH="${ISSM_DIR}/externalpackages/autotools/install/bin:$PATH"
+
+# Cleanup
+rm -rf install src
+mkdir install
+
+# Install m4
+echo " === INSTALLING M4 =="
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/m4-${M4_VER}.tar.gz" "m4-${M4_VER}.tar.gz"
+tar -zxvf m4-${M4_VER}.tar.gz
+mv m4-${M4_VER} src
+cd src
+
+## Fixes required by glibc-2.28
+#
+# Source: http://www.linuxfromscratch.org/lfs/view/development/chapter06/m4.html
+#
+sed -i 's/IO_ftrylockfile/IO_EOF_SEEN/' lib/*.c
+echo "#define _IO_IN_BACKUP 0x100" >> lib/stdio-impl.h
+
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
+make
+make install
+cd ..
+
+# Install Autoconf
+echo " === INSTALLING AUTOCONF =="
+rm -rf src
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/autoconf-${AUTOCONF_VER}.tar.gz" "autoconf-${AUTOCONF_VER}.tar.gz"
+tar -zxvf autoconf-${AUTOCONF_VER}.tar.gz
+mv autoconf-${AUTOCONF_VER} src
+cd src
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
+make
+make install
+cd ..
+
+# Install Automake
+echo " === INSTALLING AUTOMAKE =="
+rm -rf src
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/automake-${AUTOMAKE_VER}.tar.gz" "automake-${AUTOMAKE_VER}.tar.gz"
+tar -zxvf automake-${AUTOMAKE_VER}.tar.gz
+mv automake-${AUTOMAKE_VER} src
+cd src
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
+make
+make install
+cd ..
+
+# Install libtool
+echo " === INSTALLING LIBTOOL =="
+rm -rf src
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/libtool-${LIBTOOL_VER}.tar.gz" "libtool-${LIBTOOL_VER}.tar.gz"
+tar -zxvf libtool-${LIBTOOL_VER}.tar.gz
+rm libtool-${LIBTOOL_VER}.tar.gz
+mv libtool-${LIBTOOL_VER} src
+cd src
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
+make
+make install
+cd ..
Index: /issm/trunk/externalpackages/autotools/install.sh
===================================================================
--- /issm/trunk/externalpackages/autotools/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/autotools/install.sh	(revision 24686)
@@ -2,61 +2,65 @@
 set -eu
 
-#Version numbers
-M4_VER="1.4.17"
+
+## Constants
+#
 AUTOCONF_VER="2.69"
 AUTOMAKE_VER="1.16.1"
 LIBTOOL_VER="2.4.2"
+M4_VER="1.4.18"
 
-# Clean up existing directories
+## Environment
+#
+export PATH="${ISSM_DIR}/externalpackages/autotools/install/bin:$PATH"
+
+# Cleanup
 rm -rf install src
-
-# Set up for installation
 mkdir install
-export PATH="$ISSM_DIR/externalpackages/autotools/install/bin:$PATH"
 
 # Install m4
 echo " === INSTALLING M4 =="
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/m4-$M4_VER.tar.gz" "m4-$M4_VER.tar.gz"
-tar -zxvf m4-$M4_VER.tar.gz
-mv m4-$M4_VER src
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/m4-${M4_VER}.tar.gz" "m4-${M4_VER}.tar.gz"
+tar -zxvf m4-${M4_VER}.tar.gz
+mv m4-${M4_VER} src
 cd src
-./configure --prefix="$ISSM_DIR/externalpackages/autotools/install"
+
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
 make
 make install
 cd ..
 
-#install autoconf
+# Install Autoconf
 echo " === INSTALLING AUTOCONF =="
 rm -rf src
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/autoconf-$AUTOCONF_VER.tar.gz" "autoconf-$AUTOCONF_VER.tar.gz"
-tar -zxvf autoconf-$AUTOCONF_VER.tar.gz
-mv autoconf-$AUTOCONF_VER src
-cd src 
-./configure --prefix="$ISSM_DIR/externalpackages/autotools/install" 
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/autoconf-${AUTOCONF_VER}.tar.gz" "autoconf-${AUTOCONF_VER}.tar.gz"
+tar -zxvf autoconf-${AUTOCONF_VER}.tar.gz
+mv autoconf-${AUTOCONF_VER} src
+cd src
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
 make
 make install
 cd ..
 
-#install automake
+# Install Automake
 echo " === INSTALLING AUTOMAKE =="
 rm -rf src
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/automake-$AUTOMAKE_VER.tar.gz" "automake-$AUTOMAKE_VER.tar.gz"
-tar -zxvf  automake-$AUTOMAKE_VER.tar.gz
-mv automake-$AUTOMAKE_VER src
-cd src 
-./configure --prefix="$ISSM_DIR/externalpackages/autotools/install" 
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/automake-${AUTOMAKE_VER}.tar.gz" "automake-${AUTOMAKE_VER}.tar.gz"
+tar -zxvf automake-${AUTOMAKE_VER}.tar.gz
+mv automake-${AUTOMAKE_VER} src
+cd src
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
 make
 make install
 cd ..
 
-#install libtool
+# Install libtool
 echo " === INSTALLING LIBTOOL =="
 rm -rf src
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/libtool-$LIBTOOL_VER.tar.gz" "libtool-$LIBTOOL_VER.tar.gz"
-tar -zxvf  libtool-$LIBTOOL_VER.tar.gz
-rm libtool-$LIBTOOL_VER.tar.gz
-mv libtool-$LIBTOOL_VER src
-cd src 
-./configure --prefix="$ISSM_DIR/externalpackages/autotools/install"
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/libtool-${LIBTOOL_VER}.tar.gz" "libtool-${LIBTOOL_VER}.tar.gz"
+tar -zxvf libtool-${LIBTOOL_VER}.tar.gz
+rm libtool-${LIBTOOL_VER}.tar.gz
+mv libtool-${LIBTOOL_VER} src
+cd src
+./configure --prefix="${ISSM_DIR}/externalpackages/autotools/install"
 make
 make install
Index: /issm/trunk/externalpackages/boost/configs/1.55/boost/multi_index/ordered_index.hpp
===================================================================
--- /issm/trunk/externalpackages/boost/configs/1.55/boost/multi_index/ordered_index.hpp	(revision 24686)
+++ /issm/trunk/externalpackages/boost/configs/1.55/boost/multi_index/ordered_index.hpp	(revision 24686)
@@ -0,0 +1,1529 @@
+/* Copyright 2003-2013 Joaquin M Lopez Munoz.
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * See http://www.boost.org/libs/multi_index for library home page.
+ *
+ * The internal implementation of red-black trees is based on that of SGI STL
+ * stl_tree.h file:
+ *
+ * Copyright (c) 1996,1997
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation.  Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose.  It is provided "as is" without express or implied warranty.
+ *
+ *
+ * Copyright (c) 1994
+ * Hewlett-Packard Company
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation.  Hewlett-Packard Company makes no
+ * representations about the suitability of this software for any
+ * purpose.  It is provided "as is" without express or implied warranty.
+ *
+ */
+
+#ifndef BOOST_MULTI_INDEX_ORDERED_INDEX_HPP
+#define BOOST_MULTI_INDEX_ORDERED_INDEX_HPP
+
+#if defined(_MSC_VER)&&(_MSC_VER>=1200)
+#pragma once
+#endif
+
+#include <boost/config.hpp> /* keep it first to prevent nasty warns in MSVC */
+#include <algorithm>
+#include <boost/call_traits.hpp>
+#include <boost/detail/no_exceptions_support.hpp>
+#include <boost/detail/workaround.hpp>
+#include <boost/foreach_fwd.hpp>
+#include <boost/iterator/reverse_iterator.hpp>
+#include <boost/move/core.hpp>
+#include <boost/mpl/bool.hpp>
+#include <boost/mpl/if.hpp>
+#include <boost/mpl/push_front.hpp>
+#include <boost/multi_index/detail/access_specifier.hpp>
+#include <boost/multi_index/detail/bidir_node_iterator.hpp>
+#include <boost/multi_index/detail/do_not_copy_elements_tag.hpp>
+#include <boost/multi_index/detail/index_node_base.hpp>
+#include <boost/multi_index/detail/modify_key_adaptor.hpp>
+#include <boost/multi_index/detail/ord_index_node.hpp>
+#include <boost/multi_index/detail/ord_index_ops.hpp>
+#include <boost/multi_index/detail/safe_ctr_proxy.hpp>
+#include <boost/multi_index/detail/safe_mode.hpp>
+#include <boost/multi_index/detail/scope_guard.hpp>
+#include <boost/multi_index/detail/unbounded.hpp>
+#include <boost/multi_index/detail/value_compare.hpp>
+#include <boost/multi_index/detail/vartempl_support.hpp>
+#include <boost/multi_index/ordered_index_fwd.hpp>
+#include <boost/ref.hpp>
+#include <boost/tuple/tuple.hpp>
+#include <boost/type_traits/is_same.hpp>
+#include <utility>
+
+#if !defined(BOOST_NO_CXX11_HDR_INITIALIZER_LIST)
+#include <initializer_list>
+#endif
+
+#if !defined(BOOST_MULTI_INDEX_DISABLE_SERIALIZATION)
+#include <boost/archive/archive_exception.hpp>
+#include <boost/bind.hpp>
+#include <boost/multi_index/detail/duplicates_iterator.hpp>
+#include <boost/throw_exception.hpp>
+#endif
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_INVARIANT_CHECKING)
+#define BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT_OF(x)                    \
+  detail::scope_guard BOOST_JOIN(check_invariant_,__LINE__)=                 \
+    detail::make_obj_guard(x,&ordered_index::check_invariant_);              \
+  BOOST_JOIN(check_invariant_,__LINE__).touch();
+#define BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT                          \
+  BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT_OF(*this)
+#else
+#define BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT_OF(x)
+#define BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT
+#endif
+
+namespace boost{
+
+namespace multi_index{
+
+namespace detail{
+
+/* ordered_index adds a layer of ordered indexing to a given Super */
+
+/* Most of the implementation of unique and non-unique indices is
+ * shared. We tell from one another on instantiation time by using
+ * these tags.
+ */
+
+struct ordered_unique_tag{};
+struct ordered_non_unique_tag{};
+
+template<
+  typename KeyFromValue,typename Compare,
+  typename SuperMeta,typename TagList,typename Category
+>
+class ordered_index:
+  BOOST_MULTI_INDEX_PROTECTED_IF_MEMBER_TEMPLATE_FRIENDS SuperMeta::type
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+#if BOOST_WORKAROUND(BOOST_MSVC,<1300)
+  ,public safe_ctr_proxy_impl<
+    bidir_node_iterator<
+      ordered_index_node<typename SuperMeta::type::node_type> >,
+    ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category> >
+#else
+  ,public safe_mode::safe_container<
+    ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category> >
+#endif
+#endif
+
+{
+#if defined(BOOST_MULTI_INDEX_ENABLE_INVARIANT_CHECKING)&&\
+    BOOST_WORKAROUND(__MWERKS__,<=0x3003)
+/* The "ISO C++ Template Parser" option in CW8.3 has a problem with the
+ * lifetime of const references bound to temporaries --precisely what
+ * scopeguards are.
+ */
+
+#pragma parse_mfunc_templ off
+#endif
+
+  typedef typename SuperMeta::type                   super;
+
+protected:
+  typedef ordered_index_node<
+    typename super::node_type>                       node_type;
+
+private:
+  typedef typename node_type::impl_type              node_impl_type;
+  typedef typename node_impl_type::pointer           node_impl_pointer;
+
+public:
+  /* types */
+
+  typedef typename KeyFromValue::result_type         key_type;
+  typedef typename node_type::value_type             value_type;
+  typedef KeyFromValue                               key_from_value;
+  typedef Compare                                    key_compare;
+  typedef value_comparison<
+    value_type,KeyFromValue,Compare>                 value_compare;
+  typedef tuple<key_from_value,key_compare>          ctor_args;
+  typedef typename super::final_allocator_type       allocator_type;
+  typedef typename allocator_type::reference         reference;
+  typedef typename allocator_type::const_reference   const_reference;
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+#if BOOST_WORKAROUND(BOOST_MSVC,<1300)
+  typedef safe_mode::safe_iterator<
+    bidir_node_iterator<node_type>,
+    safe_ctr_proxy<
+      bidir_node_iterator<node_type> > >             iterator;
+#else
+  typedef safe_mode::safe_iterator<
+    bidir_node_iterator<node_type>,
+    ordered_index>                                   iterator;
+#endif
+#else
+  typedef bidir_node_iterator<node_type>             iterator;
+#endif
+
+  typedef iterator                                   const_iterator;
+
+  typedef std::size_t                                size_type;
+  typedef std::ptrdiff_t                             difference_type;
+  typedef typename allocator_type::pointer           pointer;
+  typedef typename allocator_type::const_pointer     const_pointer;
+  typedef typename
+    boost::reverse_iterator<iterator>                reverse_iterator;
+  typedef typename
+    boost::reverse_iterator<const_iterator>          const_reverse_iterator;
+  typedef TagList                                    tag_list;
+
+protected:
+  typedef typename super::final_node_type            final_node_type;
+  typedef tuples::cons<
+    ctor_args,
+    typename super::ctor_args_list>                  ctor_args_list;
+  typedef typename mpl::push_front<
+    typename super::index_type_list,
+    ordered_index>::type                             index_type_list;
+  typedef typename mpl::push_front<
+    typename super::iterator_type_list,
+    iterator>::type    iterator_type_list;
+  typedef typename mpl::push_front<
+    typename super::const_iterator_type_list,
+    const_iterator>::type                            const_iterator_type_list;
+  typedef typename super::copy_map_type              copy_map_type;
+
+#if !defined(BOOST_MULTI_INDEX_DISABLE_SERIALIZATION)
+  typedef typename super::index_saver_type           index_saver_type;
+  typedef typename super::index_loader_type          index_loader_type;
+#endif
+
+private:
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+#if BOOST_WORKAROUND(BOOST_MSVC,<1300)
+  typedef safe_ctr_proxy_impl<
+    bidir_node_iterator<node_type>,
+    ordered_index>                                   safe_super;
+#else
+  typedef safe_mode::safe_container<ordered_index>   safe_super;
+#endif
+#endif
+
+  typedef typename call_traits<
+    value_type>::param_type                          value_param_type;
+  typedef typename call_traits<
+    key_type>::param_type                            key_param_type;
+
+  /* Needed to avoid commas in BOOST_MULTI_INDEX_OVERLOADS_TO_VARTEMPL
+   * expansion.
+   */
+
+  typedef std::pair<iterator,bool>                   emplace_return_type;
+
+public:
+
+  /* construct/copy/destroy
+   * Default and copy ctors are in the protected section as indices are
+   * not supposed to be created on their own. No range ctor either.
+   */
+
+  ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& operator=(
+    const ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x)
+  {
+    this->final()=x.final();
+    return *this;
+  }
+
+#if !defined(BOOST_NO_CXX11_HDR_INITIALIZER_LIST)
+  ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& operator=(
+    std::initializer_list<value_type> list)
+  {
+    this->final()=list;
+    return *this;
+  }
+#endif
+
+  allocator_type get_allocator()const
+  {
+    return this->final().get_allocator();
+  }
+
+  /* iterators */
+
+  iterator               begin(){return make_iterator(leftmost());}
+  const_iterator         begin()const{return make_iterator(leftmost());}
+  iterator               end(){return make_iterator(header());}
+  const_iterator         end()const{return make_iterator(header());}
+  reverse_iterator       rbegin(){return boost::make_reverse_iterator(end());}
+  const_reverse_iterator rbegin()const{return make_reverse_iterator(end());}
+  reverse_iterator       rend(){return make_reverse_iterator(begin());}
+  const_reverse_iterator rend()const{return make_reverse_iterator(begin());}
+  const_iterator         cbegin()const{return begin();}
+  const_iterator         cend()const{return end();}
+  const_reverse_iterator crbegin()const{return rbegin();}
+  const_reverse_iterator crend()const{return rend();}
+
+  iterator iterator_to(const value_type& x)
+  {
+    return make_iterator(node_from_value<node_type>(&x));
+  }
+
+  const_iterator iterator_to(const value_type& x)const
+  {
+    return make_iterator(node_from_value<node_type>(&x));
+  }
+
+  /* capacity */
+
+  bool      empty()const{return this->final_empty_();}
+  size_type size()const{return this->final_size_();}
+  size_type max_size()const{return this->final_max_size_();}
+
+  /* modifiers */
+
+  BOOST_MULTI_INDEX_OVERLOADS_TO_VARTEMPL(
+    emplace_return_type,emplace,emplace_impl)
+
+  BOOST_MULTI_INDEX_OVERLOADS_TO_VARTEMPL_EXTRA_ARG(
+    iterator,emplace_hint,emplace_hint_impl,iterator,position)
+
+  std::pair<iterator,bool> insert(const value_type& x)
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<final_node_type*,bool> p=this->final_insert_(x);
+    return std::pair<iterator,bool>(make_iterator(p.first),p.second);
+  }
+
+  std::pair<iterator,bool> insert(BOOST_RV_REF(value_type) x)
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<final_node_type*,bool> p=this->final_insert_rv_(x);
+    return std::pair<iterator,bool>(make_iterator(p.first),p.second);
+  }
+
+  iterator insert(iterator position,const value_type& x)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<final_node_type*,bool> p=this->final_insert_(
+      x,static_cast<final_node_type*>(position.get_node()));
+    return make_iterator(p.first);
+  }
+
+  iterator insert(iterator position,BOOST_RV_REF(value_type) x)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<final_node_type*,bool> p=this->final_insert_rv_(
+      x,static_cast<final_node_type*>(position.get_node()));
+    return make_iterator(p.first);
+  }
+
+  template<typename InputIterator>
+  void insert(InputIterator first,InputIterator last)
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    node_type* hint=header(); /* end() */
+    for(;first!=last;++first){
+      hint=this->final_insert_ref_(
+        *first,static_cast<final_node_type*>(hint)).first;
+      node_type::increment(hint);
+    }
+  }
+
+#if !defined(BOOST_NO_CXX11_HDR_INITIALIZER_LIST)
+  void insert(std::initializer_list<value_type> list)
+  {
+    insert(list.begin(),list.end());
+  }
+#endif
+
+  iterator erase(iterator position)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    this->final_erase_(static_cast<final_node_type*>(position++.get_node()));
+    return position;
+  }
+
+  size_type erase(key_param_type x)
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<iterator,iterator> p=equal_range(x);
+    size_type s=0;
+    while(p.first!=p.second){
+      p.first=erase(p.first);
+      ++s;
+    }
+    return s;
+  }
+
+  iterator erase(iterator first,iterator last)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(first);
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(last);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(first,*this);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(last,*this);
+    BOOST_MULTI_INDEX_CHECK_VALID_RANGE(first,last);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    while(first!=last){
+      first=erase(first);
+    }
+    return first;
+  }
+
+  bool replace(iterator position,const value_type& x)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    return this->final_replace_(
+      x,static_cast<final_node_type*>(position.get_node()));
+  }
+
+  bool replace(iterator position,BOOST_RV_REF(value_type) x)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    return this->final_replace_rv_(
+      x,static_cast<final_node_type*>(position.get_node()));
+  }
+
+  template<typename Modifier>
+  bool modify(iterator position,Modifier mod)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    /* MSVC++ 6.0 optimizer on safe mode code chokes if this
+     * this is not added. Left it for all compilers as it does no
+     * harm.
+     */
+
+    position.detach();
+#endif
+
+    return this->final_modify_(
+      mod,static_cast<final_node_type*>(position.get_node()));
+  }
+
+  template<typename Modifier,typename Rollback>
+  bool modify(iterator position,Modifier mod,Rollback back)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    /* MSVC++ 6.0 optimizer on safe mode code chokes if this
+     * this is not added. Left it for all compilers as it does no
+     * harm.
+     */
+
+    position.detach();
+#endif
+
+    return this->final_modify_(
+      mod,back,static_cast<final_node_type*>(position.get_node()));
+  }
+
+  template<typename Modifier>
+  bool modify_key(iterator position,Modifier mod)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    return modify(
+      position,modify_key_adaptor<Modifier,value_type,KeyFromValue>(mod,key));
+  }
+
+  template<typename Modifier,typename Rollback>
+  bool modify_key(iterator position,Modifier mod,Rollback back)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_DEREFERENCEABLE_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    return modify(
+      position,
+      modify_key_adaptor<Modifier,value_type,KeyFromValue>(mod,key),
+      modify_key_adaptor<Rollback,value_type,KeyFromValue>(back,key));
+  }
+
+  void swap(ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x)
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT_OF(x);
+    this->final_swap_(x.final());
+  }
+
+  void clear()
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    this->final_clear_();
+  }
+
+  /* observers */
+
+  key_from_value key_extractor()const{return key;}
+  key_compare    key_comp()const{return comp_;}
+  value_compare  value_comp()const{return value_compare(key,comp_);}
+
+  /* set operations */
+
+  /* Internally, these ops rely on const_iterator being the same
+   * type as iterator.
+   */
+
+  template<typename CompatibleKey>
+  iterator find(const CompatibleKey& x)const
+  {
+    return make_iterator(ordered_index_find(root(),header(),key,x,comp_));
+  }
+
+  template<typename CompatibleKey,typename CompatibleCompare>
+  iterator find(
+    const CompatibleKey& x,const CompatibleCompare& comp)const
+  {
+    return make_iterator(ordered_index_find(root(),header(),key,x,comp));
+  }
+
+  template<typename CompatibleKey>
+  size_type count(const CompatibleKey& x)const
+  {
+    return count(x,comp_);
+  }
+
+  template<typename CompatibleKey,typename CompatibleCompare>
+  size_type count(const CompatibleKey& x,const CompatibleCompare& comp)const
+  {
+    std::pair<iterator,iterator> p=equal_range(x,comp);
+    size_type n=std::distance(p.first,p.second);
+    return n;
+  }
+
+  template<typename CompatibleKey>
+  iterator lower_bound(const CompatibleKey& x)const
+  {
+    return make_iterator(
+      ordered_index_lower_bound(root(),header(),key,x,comp_));
+  }
+
+  template<typename CompatibleKey,typename CompatibleCompare>
+  iterator lower_bound(
+    const CompatibleKey& x,const CompatibleCompare& comp)const
+  {
+    return make_iterator(
+      ordered_index_lower_bound(root(),header(),key,x,comp));
+  }
+
+  template<typename CompatibleKey>
+  iterator upper_bound(const CompatibleKey& x)const
+  {
+    return make_iterator(
+      ordered_index_upper_bound(root(),header(),key,x,comp_));
+  }
+
+  template<typename CompatibleKey,typename CompatibleCompare>
+  iterator upper_bound(
+    const CompatibleKey& x,const CompatibleCompare& comp)const
+  {
+    return make_iterator(
+      ordered_index_upper_bound(root(),header(),key,x,comp));
+  }
+
+  template<typename CompatibleKey>
+  std::pair<iterator,iterator> equal_range(
+    const CompatibleKey& x)const
+  {
+    std::pair<node_type*,node_type*> p=
+      ordered_index_equal_range(root(),header(),key,x,comp_);
+    return std::pair<iterator,iterator>(
+      make_iterator(p.first),make_iterator(p.second));
+  }
+
+  template<typename CompatibleKey,typename CompatibleCompare>
+  std::pair<iterator,iterator> equal_range(
+    const CompatibleKey& x,const CompatibleCompare& comp)const
+  {
+    std::pair<node_type*,node_type*> p=
+      ordered_index_equal_range(root(),header(),key,x,comp);
+    return std::pair<iterator,iterator>(
+      make_iterator(p.first),make_iterator(p.second));
+  }
+
+  /* range */
+
+  template<typename LowerBounder,typename UpperBounder>
+  std::pair<iterator,iterator>
+  range(LowerBounder lower,UpperBounder upper)const
+  {
+    typedef typename mpl::if_<
+      is_same<LowerBounder,unbounded_type>,
+      BOOST_DEDUCED_TYPENAME mpl::if_<
+        is_same<UpperBounder,unbounded_type>,
+        both_unbounded_tag,
+        lower_unbounded_tag
+      >::type,
+      BOOST_DEDUCED_TYPENAME mpl::if_<
+        is_same<UpperBounder,unbounded_type>,
+        upper_unbounded_tag,
+        none_unbounded_tag
+      >::type
+    >::type dispatch;
+
+    return range(lower,upper,dispatch());
+  }
+
+BOOST_MULTI_INDEX_PROTECTED_IF_MEMBER_TEMPLATE_FRIENDS:
+  ordered_index(const ctor_args_list& args_list,const allocator_type& al):
+    super(args_list.get_tail(),al),
+    key(tuples::get<0>(args_list.get_head())),
+    comp_(tuples::get<1>(args_list.get_head()))
+  {
+    empty_initialize();
+  }
+
+  ordered_index(
+    const ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x):
+    super(x),
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    safe_super(),
+#endif
+
+    key(x.key),
+    comp_(x.comp_)
+  {
+    /* Copy ctor just takes the key and compare objects from x. The rest is
+     * done in a subsequent call to copy_().
+     */
+  }
+
+  ordered_index(
+     const ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x,
+     do_not_copy_elements_tag):
+    super(x,do_not_copy_elements_tag()),
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    safe_super(),
+#endif
+
+    key(x.key),
+    comp_(x.comp_)
+  {
+    empty_initialize();
+  }
+
+  ~ordered_index()
+  {
+    /* the container is guaranteed to be empty by now */
+  }
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+  iterator       make_iterator(node_type* node){return iterator(node,this);}
+  const_iterator make_iterator(node_type* node)const
+    {return const_iterator(node,const_cast<ordered_index*>(this));}
+#else
+  iterator       make_iterator(node_type* node){return iterator(node);}
+  const_iterator make_iterator(node_type* node)const
+                   {return const_iterator(node);}
+#endif
+
+  void copy_(
+    const ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x,
+    const copy_map_type& map)
+  {
+    if(!x.root()){
+      empty_initialize();
+    }
+    else{
+      header()->color()=x.header()->color();
+
+      node_type* root_cpy=map.find(static_cast<final_node_type*>(x.root()));
+      header()->parent()=root_cpy->impl();
+
+      node_type* leftmost_cpy=map.find(
+        static_cast<final_node_type*>(x.leftmost()));
+      header()->left()=leftmost_cpy->impl();
+
+      node_type* rightmost_cpy=map.find(
+        static_cast<final_node_type*>(x.rightmost()));
+      header()->right()=rightmost_cpy->impl();
+
+      typedef typename copy_map_type::const_iterator copy_map_iterator;
+      for(copy_map_iterator it=map.begin(),it_end=map.end();it!=it_end;++it){
+        node_type* org=it->first;
+        node_type* cpy=it->second;
+
+        cpy->color()=org->color();
+
+        node_impl_pointer parent_org=org->parent();
+        if(parent_org==node_impl_pointer(0))cpy->parent()=node_impl_pointer(0);
+        else{
+          node_type* parent_cpy=map.find(
+            static_cast<final_node_type*>(node_type::from_impl(parent_org)));
+          cpy->parent()=parent_cpy->impl();
+          if(parent_org->left()==org->impl()){
+            parent_cpy->left()=cpy->impl();
+          }
+          else if(parent_org->right()==org->impl()){
+            /* header() does not satisfy this nor the previous check */
+            parent_cpy->right()=cpy->impl();
+          }
+        }
+
+        if(org->left()==node_impl_pointer(0))
+          cpy->left()=node_impl_pointer(0);
+        if(org->right()==node_impl_pointer(0))
+          cpy->right()=node_impl_pointer(0);
+      }
+    }
+
+    super::copy_(x,map);
+  }
+
+  template<typename Variant>
+  node_type* insert_(value_param_type v,node_type* x,Variant variant)
+  {
+    link_info inf;
+    if(!link_point(key(v),inf,Category())){
+      return node_type::from_impl(inf.pos);
+    }
+
+    node_type* res=static_cast<node_type*>(super::insert_(v,x,variant));
+    if(res==x){
+      node_impl_type::link(x->impl(),inf.side,inf.pos,header()->impl());
+    }
+    return res;
+  }
+
+  template<typename Variant>
+  node_type* insert_(
+    value_param_type v,node_type* position,node_type* x,Variant variant)
+  {
+    link_info inf;
+    if(!hinted_link_point(key(v),position,inf,Category())){
+      return node_type::from_impl(inf.pos);
+    }
+
+    node_type* res=static_cast<node_type*>(super::insert_(v,position,x,variant));
+    if(res==x){
+      node_impl_type::link(x->impl(),inf.side,inf.pos,header()->impl());
+    }
+    return res;
+  }
+
+  void erase_(node_type* x)
+  {
+    node_impl_type::rebalance_for_erase(
+      x->impl(),header()->parent(),header()->left(),header()->right());
+    super::erase_(x);
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    detach_iterators(x);
+#endif
+  }
+
+  void delete_all_nodes_()
+  {
+    delete_all_nodes(root());
+  }
+
+  void clear_()
+  {
+    super::clear_();
+    empty_initialize();
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    safe_super::detach_dereferenceable_iterators();
+#endif
+  }
+
+  void swap_(ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x)
+  {
+    std::swap(key,x.key);
+    std::swap(comp_,x.comp_);
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    safe_super::swap(x);
+#endif
+
+    super::swap_(x);
+  }
+
+  void swap_elements_(
+    ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x)
+  {
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+    safe_super::swap(x);
+#endif
+
+    super::swap_elements_(x);
+  }
+
+  template<typename Variant>
+  bool replace_(value_param_type v,node_type* x,Variant variant)
+  {
+    if(in_place(v,x,Category())){
+      return super::replace_(v,x,variant);
+    }
+
+    node_type* next=x;
+    node_type::increment(next);
+
+    node_impl_type::rebalance_for_erase(
+      x->impl(),header()->parent(),header()->left(),header()->right());
+
+    BOOST_TRY{
+      link_info inf;
+      if(link_point(key(v),inf,Category())&&super::replace_(v,x,variant)){
+        node_impl_type::link(x->impl(),inf.side,inf.pos,header()->impl());
+        return true;
+      }
+      node_impl_type::restore(x->impl(),next->impl(),header()->impl());
+      return false;
+    }
+    BOOST_CATCH(...){
+      node_impl_type::restore(x->impl(),next->impl(),header()->impl());
+      BOOST_RETHROW;
+    }
+    BOOST_CATCH_END
+  }
+
+  bool modify_(node_type* x)
+  {
+    bool b;
+    BOOST_TRY{
+      b=in_place(x->value(),x,Category());
+    }
+    BOOST_CATCH(...){
+      erase_(x);
+      BOOST_RETHROW;
+    }
+    BOOST_CATCH_END
+    if(!b){
+      node_impl_type::rebalance_for_erase(
+        x->impl(),header()->parent(),header()->left(),header()->right());
+      BOOST_TRY{
+        link_info inf;
+        if(!link_point(key(x->value()),inf,Category())){
+          super::erase_(x);
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+          detach_iterators(x);
+#endif
+          return false;
+        }
+        node_impl_type::link(x->impl(),inf.side,inf.pos,header()->impl());
+      }
+      BOOST_CATCH(...){
+        super::erase_(x);
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+        detach_iterators(x);
+#endif
+
+        BOOST_RETHROW;
+      }
+      BOOST_CATCH_END
+    }
+
+    BOOST_TRY{
+      if(!super::modify_(x)){
+        node_impl_type::rebalance_for_erase(
+          x->impl(),header()->parent(),header()->left(),header()->right());
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+        detach_iterators(x);
+#endif
+
+        return false;
+      }
+      else return true;
+    }
+    BOOST_CATCH(...){
+      node_impl_type::rebalance_for_erase(
+        x->impl(),header()->parent(),header()->left(),header()->right());
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+      detach_iterators(x);
+#endif
+
+      BOOST_RETHROW;
+    }
+    BOOST_CATCH_END
+  }
+
+  bool modify_rollback_(node_type* x)
+  {
+    if(in_place(x->value(),x,Category())){
+      return super::modify_rollback_(x);
+    }
+
+    node_type* next=x;
+    node_type::increment(next);
+
+    node_impl_type::rebalance_for_erase(
+      x->impl(),header()->parent(),header()->left(),header()->right());
+
+    BOOST_TRY{
+      link_info inf;
+      if(link_point(key(x->value()),inf,Category())&&
+         super::modify_rollback_(x)){
+        node_impl_type::link(x->impl(),inf.side,inf.pos,header()->impl());
+        return true;
+      }
+      node_impl_type::restore(x->impl(),next->impl(),header()->impl());
+      return false;
+    }
+    BOOST_CATCH(...){
+      node_impl_type::restore(x->impl(),next->impl(),header()->impl());
+      BOOST_RETHROW;
+    }
+    BOOST_CATCH_END
+  }
+
+#if !defined(BOOST_MULTI_INDEX_DISABLE_SERIALIZATION)
+  /* serialization */
+
+  template<typename Archive>
+  void save_(
+    Archive& ar,const unsigned int version,const index_saver_type& sm)const
+  {
+    save_(ar,version,sm,Category());
+  }
+
+  template<typename Archive>
+  void load_(Archive& ar,const unsigned int version,const index_loader_type& lm)
+  {
+    load_(ar,version,lm,Category());
+  }
+#endif
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_INVARIANT_CHECKING)
+  /* invariant stuff */
+
+  bool invariant_()const
+  {
+    if(size()==0||begin()==end()){
+      if(size()!=0||begin()!=end()||
+         header()->left()!=header()->impl()||
+         header()->right()!=header()->impl())return false;
+    }
+    else{
+      if((size_type)std::distance(begin(),end())!=size())return false;
+
+      std::size_t len=node_impl_type::black_count(
+        leftmost()->impl(),root()->impl());
+      for(const_iterator it=begin(),it_end=end();it!=it_end;++it){
+        node_type* x=it.get_node();
+        node_type* left_x=node_type::from_impl(x->left());
+        node_type* right_x=node_type::from_impl(x->right());
+
+        if(x->color()==red){
+          if((left_x&&left_x->color()==red)||
+             (right_x&&right_x->color()==red))return false;
+        }
+        if(left_x&&comp_(key(x->value()),key(left_x->value())))return false;
+        if(right_x&&comp_(key(right_x->value()),key(x->value())))return false;
+        if(!left_x&&!right_x&&
+           node_impl_type::black_count(x->impl(),root()->impl())!=len)
+          return false;
+      }
+
+      if(leftmost()->impl()!=node_impl_type::minimum(root()->impl()))
+        return false;
+      if(rightmost()->impl()!=node_impl_type::maximum(root()->impl()))
+        return false;
+    }
+
+    return super::invariant_();
+  }
+
+
+  /* This forwarding function eases things for the boost::mem_fn construct
+   * in BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT. Actually,
+   * final_check_invariant is already an inherited member function of
+   * ordered_index.
+   */
+  void check_invariant_()const{this->final_check_invariant_();}
+#endif
+
+private:
+  node_type* header()const{return this->final_header();}
+  node_type* root()const{return node_type::from_impl(header()->parent());}
+  node_type* leftmost()const{return node_type::from_impl(header()->left());}
+  node_type* rightmost()const{return node_type::from_impl(header()->right());}
+
+  void empty_initialize()
+  {
+    header()->color()=red;
+    /* used to distinguish header() from root, in iterator.operator++ */
+
+    header()->parent()=node_impl_pointer(0);
+    header()->left()=header()->impl();
+    header()->right()=header()->impl();
+  }
+
+  struct link_info
+  {
+    link_info():side(to_left){}
+
+    ordered_index_side side;
+    node_impl_pointer  pos;
+  };
+
+  bool link_point(key_param_type k,link_info& inf,ordered_unique_tag)
+  {
+    node_type* y=header();
+    node_type* x=root();
+    bool c=true;
+    while(x){
+      y=x;
+      c=comp_(k,key(x->value()));
+      x=node_type::from_impl(c?x->left():x->right());
+    }
+    node_type* yy=y;
+    if(c){
+      if(yy==leftmost()){
+        inf.side=to_left;
+        inf.pos=y->impl();
+        return true;
+      }
+      else node_type::decrement(yy);
+    }
+
+    if(comp_(key(yy->value()),k)){
+      inf.side=c?to_left:to_right;
+      inf.pos=y->impl();
+      return true;
+    }
+    else{
+      inf.pos=yy->impl();
+      return false;
+    }
+  }
+
+  bool link_point(key_param_type k,link_info& inf,ordered_non_unique_tag)
+  {
+    node_type* y=header();
+    node_type* x=root();
+    bool c=true;
+    while (x){
+     y=x;
+     c=comp_(k,key(x->value()));
+     x=node_type::from_impl(c?x->left():x->right());
+    }
+    inf.side=c?to_left:to_right;
+    inf.pos=y->impl();
+    return true;
+  }
+
+  bool lower_link_point(key_param_type k,link_info& inf,ordered_non_unique_tag)
+  {
+    node_type* y=header();
+    node_type* x=root();
+    bool c=false;
+    while (x){
+     y=x;
+     c=comp_(key(x->value()),k);
+     x=node_type::from_impl(c?x->right():x->left());
+    }
+    inf.side=c?to_right:to_left;
+    inf.pos=y->impl();
+    return true;
+  }
+
+  bool hinted_link_point(
+    key_param_type k,node_type* position,link_info& inf,ordered_unique_tag)
+  {
+    if(position->impl()==header()->left()){
+      if(size()>0&&comp_(k,key(position->value()))){
+        inf.side=to_left;
+        inf.pos=position->impl();
+        return true;
+      }
+      else return link_point(k,inf,ordered_unique_tag());
+    }
+    else if(position==header()){
+      if(comp_(key(rightmost()->value()),k)){
+        inf.side=to_right;
+        inf.pos=rightmost()->impl();
+        return true;
+      }
+      else return link_point(k,inf,ordered_unique_tag());
+    }
+    else{
+      node_type* before=position;
+      node_type::decrement(before);
+      if(comp_(key(before->value()),k)&&comp_(k,key(position->value()))){
+        if(before->right()==node_impl_pointer(0)){
+          inf.side=to_right;
+          inf.pos=before->impl();
+          return true;
+        }
+        else{
+          inf.side=to_left;
+          inf.pos=position->impl();
+          return true;
+        }
+      }
+      else return link_point(k,inf,ordered_unique_tag());
+    }
+  }
+
+  bool hinted_link_point(
+    key_param_type k,node_type* position,link_info& inf,ordered_non_unique_tag)
+  {
+    if(position->impl()==header()->left()){
+      if(size()>0&&!comp_(key(position->value()),k)){
+        inf.side=to_left;
+        inf.pos=position->impl();
+        return true;
+      }
+      else return lower_link_point(k,inf,ordered_non_unique_tag());
+    }
+    else if(position==header()){
+      if(!comp_(k,key(rightmost()->value()))){
+        inf.side=to_right;
+        inf.pos=rightmost()->impl();
+        return true;
+      }
+      else return link_point(k,inf,ordered_non_unique_tag());
+    }
+    else{
+      node_type* before=position;
+      node_type::decrement(before);
+      if(!comp_(k,key(before->value()))){
+        if(!comp_(key(position->value()),k)){
+          if(before->right()==node_impl_pointer(0)){
+            inf.side=to_right;
+            inf.pos=before->impl();
+            return true;
+          }
+          else{
+            inf.side=to_left;
+            inf.pos=position->impl();
+            return true;
+          }
+        }
+        else return lower_link_point(k,inf,ordered_non_unique_tag());
+      }
+      else return link_point(k,inf,ordered_non_unique_tag());
+    }
+  }
+
+  void delete_all_nodes(node_type* x)
+  {
+    if(!x)return;
+
+    delete_all_nodes(node_type::from_impl(x->left()));
+    delete_all_nodes(node_type::from_impl(x->right()));
+    this->final_delete_node_(static_cast<final_node_type*>(x));
+  }
+
+  bool in_place(value_param_type v,node_type* x,ordered_unique_tag)
+  {
+    node_type* y;
+    if(x!=leftmost()){
+      y=x;
+      node_type::decrement(y);
+      if(!comp_(key(y->value()),key(v)))return false;
+    }
+
+    y=x;
+    node_type::increment(y);
+    return y==header()||comp_(key(v),key(y->value()));
+  }
+
+  bool in_place(value_param_type v,node_type* x,ordered_non_unique_tag)
+  {
+    node_type* y;
+    if(x!=leftmost()){
+      y=x;
+      node_type::decrement(y);
+      if(comp_(key(v),key(y->value())))return false;
+    }
+
+    y=x;
+    node_type::increment(y);
+    return y==header()||!comp_(key(y->value()),key(v));
+  }
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_SAFE_MODE)
+  void detach_iterators(node_type* x)
+  {
+    iterator it=make_iterator(x);
+    safe_mode::detach_equivalent_iterators(it);
+  }
+#endif
+
+  template<BOOST_MULTI_INDEX_TEMPLATE_PARAM_PACK>
+  std::pair<iterator,bool> emplace_impl(BOOST_MULTI_INDEX_FUNCTION_PARAM_PACK)
+  {
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<final_node_type*,bool>p=
+      this->final_emplace_(BOOST_MULTI_INDEX_FORWARD_PARAM_PACK);
+    return std::pair<iterator,bool>(make_iterator(p.first),p.second);
+  }
+
+  template<BOOST_MULTI_INDEX_TEMPLATE_PARAM_PACK>
+  iterator emplace_hint_impl(
+    iterator position,BOOST_MULTI_INDEX_FUNCTION_PARAM_PACK)
+  {
+    BOOST_MULTI_INDEX_CHECK_VALID_ITERATOR(position);
+    BOOST_MULTI_INDEX_CHECK_IS_OWNER(position,*this);
+    BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT;
+    std::pair<final_node_type*,bool>p=
+      this->final_emplace_hint_(
+        static_cast<final_node_type*>(position.get_node()),
+        BOOST_MULTI_INDEX_FORWARD_PARAM_PACK);
+    return make_iterator(p.first);
+  }
+
+  template<typename LowerBounder,typename UpperBounder>
+  std::pair<iterator,iterator>
+  range(LowerBounder lower,UpperBounder upper,none_unbounded_tag)const
+  {
+    node_type* y=header();
+    node_type* z=root();
+
+    while(z){
+      if(!lower(key(z->value()))){
+        z=node_type::from_impl(z->right());
+      }
+      else if(!upper(key(z->value()))){
+        y=z;
+        z=node_type::from_impl(z->left());
+      }
+      else{
+        return std::pair<iterator,iterator>(
+          make_iterator(
+            lower_range(node_type::from_impl(z->left()),z,lower)),
+          make_iterator(
+            upper_range(node_type::from_impl(z->right()),y,upper)));
+      }
+    }
+
+    return std::pair<iterator,iterator>(make_iterator(y),make_iterator(y));
+  }
+
+  template<typename LowerBounder,typename UpperBounder>
+  std::pair<iterator,iterator>
+  range(LowerBounder,UpperBounder upper,lower_unbounded_tag)const
+  {
+    return std::pair<iterator,iterator>(
+      begin(),
+      make_iterator(upper_range(root(),header(),upper)));
+  }
+
+  template<typename LowerBounder,typename UpperBounder>
+  std::pair<iterator,iterator>
+  range(LowerBounder lower,UpperBounder,upper_unbounded_tag)const
+  {
+    return std::pair<iterator,iterator>(
+      make_iterator(lower_range(root(),header(),lower)),
+      end());
+  }
+
+  template<typename LowerBounder,typename UpperBounder>
+  std::pair<iterator,iterator>
+  range(LowerBounder,UpperBounder,both_unbounded_tag)const
+  {
+    return std::pair<iterator,iterator>(begin(),end());
+  }
+
+  template<typename LowerBounder>
+  node_type * lower_range(node_type* top,node_type* y,LowerBounder lower)const
+  {
+    while(top){
+      if(lower(key(top->value()))){
+        y=top;
+        top=node_type::from_impl(top->left());
+      }
+      else top=node_type::from_impl(top->right());
+    }
+
+    return y;
+  }
+
+  template<typename UpperBounder>
+  node_type * upper_range(node_type* top,node_type* y,UpperBounder upper)const
+  {
+    while(top){
+      if(!upper(key(top->value()))){
+        y=top;
+        top=node_type::from_impl(top->left());
+      }
+      else top=node_type::from_impl(top->right());
+    }
+
+    return y;
+  }
+
+#if !defined(BOOST_MULTI_INDEX_DISABLE_SERIALIZATION)
+  template<typename Archive>
+  void save_(
+    Archive& ar,const unsigned int version,const index_saver_type& sm,
+    ordered_unique_tag)const
+  {
+    super::save_(ar,version,sm);
+  }
+
+  template<typename Archive>
+  void load_(
+    Archive& ar,const unsigned int version,const index_loader_type& lm,
+    ordered_unique_tag)
+  {
+    super::load_(ar,version,lm);
+  }
+
+  template<typename Archive>
+  void save_(
+    Archive& ar,const unsigned int version,const index_saver_type& sm,
+    ordered_non_unique_tag)const
+  {
+    typedef duplicates_iterator<node_type,value_compare> dup_iterator;
+
+    sm.save(
+      dup_iterator(begin().get_node(),end().get_node(),value_comp()),
+      dup_iterator(end().get_node(),value_comp()),
+      ar,version);
+    super::save_(ar,version,sm);
+  }
+
+  template<typename Archive>
+  void load_(
+    Archive& ar,const unsigned int version,const index_loader_type& lm,
+    ordered_non_unique_tag)
+  {
+    lm.load(
+      ::boost::bind(&ordered_index::rearranger,this,_1,_2),
+      ar,version);
+    super::load_(ar,version,lm);
+  }
+
+  void rearranger(node_type* position,node_type *x)
+  {
+    if(!position||comp_(key(position->value()),key(x->value()))){
+      position=lower_bound(key(x->value())).get_node();
+    }
+    else if(comp_(key(x->value()),key(position->value()))){
+      /* inconsistent rearrangement */
+      throw_exception(
+        archive::archive_exception(
+          archive::archive_exception::other_exception));
+    }
+    else node_type::increment(position);
+
+    if(position!=x){
+      node_impl_type::rebalance_for_erase(
+        x->impl(),header()->parent(),header()->left(),header()->right());
+      node_impl_type::restore(
+        x->impl(),position->impl(),header()->impl());
+    }
+  }
+#endif /* serialization */
+
+  key_from_value key;
+  key_compare    comp_;
+
+#if defined(BOOST_MULTI_INDEX_ENABLE_INVARIANT_CHECKING)&&\
+    BOOST_WORKAROUND(__MWERKS__,<=0x3003)
+#pragma parse_mfunc_templ reset
+#endif
+};
+
+/* comparison */
+
+template<
+  typename KeyFromValue1,typename Compare1,
+  typename SuperMeta1,typename TagList1,typename Category1,
+  typename KeyFromValue2,typename Compare2,
+  typename SuperMeta2,typename TagList2,typename Category2
+>
+bool operator==(
+  const ordered_index<KeyFromValue1,Compare1,SuperMeta1,TagList1,Category1>& x,
+  const ordered_index<KeyFromValue2,Compare2,SuperMeta2,TagList2,Category2>& y)
+{
+  return x.size()==y.size()&&std::equal(x.begin(),x.end(),y.begin());
+}
+
+template<
+  typename KeyFromValue1,typename Compare1,
+  typename SuperMeta1,typename TagList1,typename Category1,
+  typename KeyFromValue2,typename Compare2,
+  typename SuperMeta2,typename TagList2,typename Category2
+>
+bool operator<(
+  const ordered_index<KeyFromValue1,Compare1,SuperMeta1,TagList1,Category1>& x,
+  const ordered_index<KeyFromValue2,Compare2,SuperMeta2,TagList2,Category2>& y)
+{
+  return std::lexicographical_compare(x.begin(),x.end(),y.begin(),y.end());
+}
+
+template<
+  typename KeyFromValue1,typename Compare1,
+  typename SuperMeta1,typename TagList1,typename Category1,
+  typename KeyFromValue2,typename Compare2,
+  typename SuperMeta2,typename TagList2,typename Category2
+>
+bool operator!=(
+  const ordered_index<KeyFromValue1,Compare1,SuperMeta1,TagList1,Category1>& x,
+  const ordered_index<KeyFromValue2,Compare2,SuperMeta2,TagList2,Category2>& y)
+{
+  return !(x==y);
+}
+
+template<
+  typename KeyFromValue1,typename Compare1,
+  typename SuperMeta1,typename TagList1,typename Category1,
+  typename KeyFromValue2,typename Compare2,
+  typename SuperMeta2,typename TagList2,typename Category2
+>
+bool operator>(
+  const ordered_index<KeyFromValue1,Compare1,SuperMeta1,TagList1,Category1>& x,
+  const ordered_index<KeyFromValue2,Compare2,SuperMeta2,TagList2,Category2>& y)
+{
+  return y<x;
+}
+
+template<
+  typename KeyFromValue1,typename Compare1,
+  typename SuperMeta1,typename TagList1,typename Category1,
+  typename KeyFromValue2,typename Compare2,
+  typename SuperMeta2,typename TagList2,typename Category2
+>
+bool operator>=(
+  const ordered_index<KeyFromValue1,Compare1,SuperMeta1,TagList1,Category1>& x,
+  const ordered_index<KeyFromValue2,Compare2,SuperMeta2,TagList2,Category2>& y)
+{
+  return !(x<y);
+}
+
+template<
+  typename KeyFromValue1,typename Compare1,
+  typename SuperMeta1,typename TagList1,typename Category1,
+  typename KeyFromValue2,typename Compare2,
+  typename SuperMeta2,typename TagList2,typename Category2
+>
+bool operator<=(
+  const ordered_index<KeyFromValue1,Compare1,SuperMeta1,TagList1,Category1>& x,
+  const ordered_index<KeyFromValue2,Compare2,SuperMeta2,TagList2,Category2>& y)
+{
+  return !(x>y);
+}
+
+/*  specialized algorithms */
+
+template<
+  typename KeyFromValue,typename Compare,
+  typename SuperMeta,typename TagList,typename Category
+>
+void swap(
+  ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& x,
+  ordered_index<KeyFromValue,Compare,SuperMeta,TagList,Category>& y)
+{
+  x.swap(y);
+}
+
+} /* namespace multi_index::detail */
+
+/* ordered_index specifiers */
+
+template<typename Arg1,typename Arg2,typename Arg3>
+struct ordered_unique
+{
+  typedef typename detail::ordered_index_args<
+    Arg1,Arg2,Arg3>                                index_args;
+  typedef typename index_args::tag_list_type::type tag_list_type;
+  typedef typename index_args::key_from_value_type key_from_value_type;
+  typedef typename index_args::compare_type        compare_type;
+
+  template<typename Super>
+  struct node_class
+  {
+    typedef detail::ordered_index_node<Super> type;
+  };
+
+  template<typename SuperMeta>
+  struct index_class
+  {
+    typedef detail::ordered_index<
+      key_from_value_type,compare_type,
+      SuperMeta,tag_list_type,detail::ordered_unique_tag> type;
+  };
+};
+
+template<typename Arg1,typename Arg2,typename Arg3>
+struct ordered_non_unique
+{
+  typedef detail::ordered_index_args<
+    Arg1,Arg2,Arg3>                                index_args;
+  typedef typename index_args::tag_list_type::type tag_list_type;
+  typedef typename index_args::key_from_value_type key_from_value_type;
+  typedef typename index_args::compare_type        compare_type;
+
+  template<typename Super>
+  struct node_class
+  {
+    typedef detail::ordered_index_node<Super> type;
+  };
+
+  template<typename SuperMeta>
+  struct index_class
+  {
+    typedef detail::ordered_index<
+      key_from_value_type,compare_type,
+      SuperMeta,tag_list_type,detail::ordered_non_unique_tag> type;
+  };
+};
+
+} /* namespace multi_index */
+
+} /* namespace boost */
+
+/* Boost.Foreach compatibility */
+
+template<
+  typename KeyFromValue,typename Compare,
+  typename SuperMeta,typename TagList,typename Category
+>
+inline boost::mpl::true_* boost_foreach_is_noncopyable(
+  boost::multi_index::detail::ordered_index<
+    KeyFromValue,Compare,SuperMeta,TagList,Category>*&,
+  boost::foreach::tag)
+{
+  return 0;
+}
+
+#undef BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT
+#undef BOOST_MULTI_INDEX_ORD_INDEX_CHECK_INVARIANT_OF
+
+#endif
Index: /issm/trunk/externalpackages/boost/install-1.55-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.55-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.55-linux-static.sh	(revision 24686)
@@ -0,0 +1,45 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+## Constants
+#
+VER="1_55_0"
+
+## Environment
+#
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/1.55/boost/multi_index/ordered_index.hpp src/boost/multi_index
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./bjam link=static install
+
+# Copy binary to install directory
+mkdir ${BOOST_ROOT}/install/bin
+cp bjam ${BOOST_ROOT}/install/bin
Index: /issm/trunk/externalpackages/boost/install-1.55-linux.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.55-linux.sh	(revision 24685)
+++ /issm/trunk/externalpackages/boost/install-1.55-linux.sh	(revision 24686)
@@ -1,40 +1,45 @@
 #!/bin/bash
-#set -eu
-#unhook set -eu because some target do fail and it is not a big deal
+#set -eu # Do not `run set -eu` because it causes some targets to fail
 
-#Note of caution:  stop after boostrap phase, and run 
-#bjam --debug-configuration, to figure out which paths boost is using to include 
-#python. make sure everyone of these paths is covered by python. If not, just make 
-#symlinks in externalpackages/python to what boost is expecting. Ther is NO WAY 
-#to get the boost library to include python support without doing that. 
 
-#Some cleanup
-rm -rf install boost_1_55_0 src
+## Constants
+#
+VER="1_55_0"
+
+## Environment
+#
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
 mkdir install src
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/boost_1_55_0.tar.gz' 'boost_1_55_0.tar.gz'
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
 
-#Untar 
-tar -zxvf  boost_1_55_0.tar.gz
+# Copy customized source and configuration files to 'src' directory
+cp configs/1.55/boost/multi_index/ordered_index.hpp src/boost/multi_index
 
-#Move boost into install directory
-mv boost_1_55_0/* src
-rm -rf boost_1_55_0
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
 
-#Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
-export CXXFLAGS='-std=c++98'
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
 
-#Configure and compile
-cd src 
-./bootstrap.sh \
-	--prefix="$ISSM_DIR/externalpackages/boost/install" \
-	--with-python=python2.7 \
-	--with-python-root="$ISSM_DIR/externalpackages/python/install" 
-
-#Compile boost
+# Compile and install
 ./bjam install
 
-#put bjam into install also: 
-mkdir ../install/bin
-cp bjam ../install/bin
+# Copy binary to install directory
+mkdir ${BOOST_ROOT}/install/bin
+cp bjam ${BOOST_ROOT}/install/bin
Index: sm/trunk/externalpackages/boost/install-1.55-linux64-static.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.55-linux64-static.sh	(revision 24685)
+++ 	(revision )
@@ -1,39 +1,0 @@
-#!/bin/bash
-#set -eu
-#unhook set -eu because some target do fail and it is not a big deal
-
-#Note of caution:  stop after boostrap phase, and run
-#bjam --debug-configuration, to figure out which paths boost is using to include
-#python. make sure everyone of these paths is covered by python. If not, just make
-#symlinks in externalpackages/python to what boost is expecting. Ther is NO WAY
-#to get the boost library to include python support without doing that.
-
-#Some cleanup
-rm -rf install boost_1_55_0 src
-mkdir install src
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh https://issm.ess.uci.edu/files/externalpackages/boost_1_55_0.tar.gz boost_1_55_0.tar.gz
-
-#Untar
-tar -zxvf boost_1_55_0.tar.gz
-
-#Move boost into install directory
-mv boost_1_55_0/* src
-rm -rf boost_1_55_0
-
-#Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
-export CXXFLAGS='-std=c++98'
-
-#Configure and compile
-cd src
-./bootstrap.sh \
-	--prefix="$ISSM_DIR/externalpackages/boost/install" \
-	--with-python=python
-
-#Compile boost
-./bjam toolset=gcc link=static install
-
-#put bjam into install also
-mkdir ../install/bin
-cp bjam ../install/bin
Index: /issm/trunk/externalpackages/boost/install-1.55-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.55-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.55-mac-static.sh	(revision 24686)
@@ -0,0 +1,45 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+## Constants
+#
+VER="1_55_0"
+
+## Environment
+#
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/1.55/boost/multi_index/ordered_index.hpp src/boost/multi_index
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./bjam toolset=darwin link=static install
+
+# Copy binary to install directory
+mkdir ${BOOST_ROOT}/install/bin
+cp bjam ${BOOST_ROOT}/install/bin
Index: /issm/trunk/externalpackages/boost/install-1.55-mac.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.55-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.55-mac.sh	(revision 24686)
@@ -0,0 +1,57 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+## Constants
+#
+VER="1_55_0"
+
+## Environment
+#
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/1.55/boost/multi_index/ordered_index.hpp src/boost/multi_index
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./bjam toolset=darwin install
+
+# Copy binary to install directory
+mkdir ${BOOST_ROOT}/install/bin
+cp bjam ${BOOST_ROOT}/install/bin
+
+# Set install_name for all shared libraries
+cd ${BOOST_ROOT}/install/lib
+for name in *.dylib; do
+	install_name_tool -id ${BOOST_ROOT}/install/lib/${name} ${name}
+done
+
+## Patch install names for certain libraries
+#
+# TODO: Figure out how to reconfigure source to apply these install names at compile time
+#
+install_name_tool -change libboost_system.dylib ${BOOST_ROOT}/install/lib/libboost_system.dylib libboost_filesystem.dylib
Index: /issm/trunk/externalpackages/boost/install-1.72-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.72-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.72-linux-static.sh	(revision 24686)
@@ -0,0 +1,48 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+# NOTE:
+# - Stop after bootstrap step and run `b2 --debug-configuration` to figure
+#	out which paths Boost is using to include Python. Make sure that each of
+#	the listed paths is covered by Python. If not, you must create a symbolic
+#	link from $ISSM_DIR/externalpackages/python to the location of the file
+#	that Boost is expecting. There is no way to get the Boost to compile with
+#	Python otherwise.
+#
+
+## Constants
+#
+VER="1_72_0"
+
+## Envrionment
+#
+#export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
+#export CXXFLAGS='-std=c++11'
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix="${ISSM_DIR}/externalpackages/boost/install" \
+	--with-python=python2.7 \
+	--with-python-root="${ISSM_DIR}/externalpackages/python/install"
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./b2 link=static install
Index: /issm/trunk/externalpackages/boost/install-1.72-linux.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.72-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.72-linux.sh	(revision 24686)
@@ -0,0 +1,48 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+# NOTE:
+# - Stop after bootstrap step and run `b2 --debug-configuration` to figure
+#	out which paths Boost is using to include Python. Make sure that each of
+#	the listed paths is covered by Python. If not, you must create a symbolic
+#	link from $ISSM_DIR/externalpackages/python to the location of the file
+#	that Boost is expecting. There is no way to get the Boost to compile with
+#	Python otherwise.
+#
+
+## Constants
+#
+VER="1_72_0"
+
+## Envrionment
+#
+#export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
+#export CXXFLAGS='-std=c++11'
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix="${ISSM_DIR}/externalpackages/boost/install" \
+	--with-python=python2.7 \
+	--with-python-root="${ISSM_DIR}/externalpackages/python/install"
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./b2 --toolset=gcc install
Index: /issm/trunk/externalpackages/boost/install-1.72-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.72-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.72-mac-static.sh	(revision 24686)
@@ -0,0 +1,47 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+## Constants
+#
+VER="1_72_0"
+
+## Environment
+#
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./b2 toolset=darwin link=static install
+
+# Copy binary to install directory
+mkdir ${BOOST_ROOT}/install/bin
+cp bjam ${BOOST_ROOT}/install/bin
+
+# Remove any dynamic libraries that may have been compiled
+#
+# TODO: Reconfigure so that dynamic libraries are not compiled at all
+#
+rm -f $(ls ${BOOST_ROOT}/install/lib/*.dylib)
Index: /issm/trunk/externalpackages/boost/install-1.72-mac.sh
===================================================================
--- /issm/trunk/externalpackages/boost/install-1.72-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/boost/install-1.72-mac.sh	(revision 24686)
@@ -0,0 +1,47 @@
+#!/bin/bash
+#set -eu # Do not `run set -eu` because it causes some targets to fail
+
+
+## Constants
+#
+VER="1_72_0"
+
+## Environment
+#
+export BOOST_ROOT="${ISSM_DIR}/externalpackages/boost"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/boost_${VER}.tar.gz" "boost_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf boost_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv boost_${VER}/* src/
+rm -rf boost_${VER}
+
+# Configure
+cd src
+./bootstrap.sh \
+	--prefix=${BOOST_ROOT}/install \
+	--with-python=python2.7
+
+# Modify project config to enable MPI
+printf "\n# Enable MPI\nusing mpi ;\n" >> project-config.jam
+
+# Compile and install
+./b2 toolset=darwin install
+
+# Copy binary to install directory
+mkdir ${BOOST_ROOT}/install/bin
+cp bjam ${BOOST_ROOT}/install/bin
+
+# Set install_name for all shared libraries
+cd ${BOOST_ROOT}/install/lib
+for name in *.dylib; do
+	install_name_tool -id ${BOOST_ROOT}/install/lib/${name} ${name}
+done
Index: /issm/trunk/externalpackages/cmake/install.sh
===================================================================
--- /issm/trunk/externalpackages/cmake/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/cmake/install.sh	(revision 24686)
@@ -1,27 +1,33 @@
 #!/bin/bash
-set -eu 
-VER="3.6.2"
+set -eu
 
-#Some cleanup
-rm -rf install cmake-$VER
-mkdir install
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/cmake-$VER.tar.gz" "cmake-$VER.tar.gz"
+## Constants
+#
+VER="3.16.2"
 
-#Untar 
-tar -zxvf  cmake-$VER.tar.gz
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/cmake-${VER}.tar.gz" "cmake-${VER}.tar.gz"
 
-#Move cmake into install directory
-mv cmake-$VER/* install
-rm -rf cmake-$VER
+# Unpack source
+tar -zxvf cmake-${VER}.tar.gz
 
-#Compile cmake
-cd install 
-./bootstrap --prefix=$ISSM_DIR/externalpackages/cmake/install
+# Cleanup
+rm -rf install
+
+# Move source into 'install' directory
+mv cmake-${VER} install
+
+# Configure
+cd install
+#./bootstrap \
+#	--prefix=${ISSM_DIR}/externalpackages/cmake/install # Breaks on ronne
+./configure \
+	--prefix=${ISSM_DIR}/externalpackages/cmake/install
+
+# Compile
 if [ $# -eq 0 ]; then
 	make
-else 
-	make -j $1; 
+else
+	make -j $1;
 fi
-make install
Index: /issm/trunk/externalpackages/codipack/install.sh
===================================================================
--- /issm/trunk/externalpackages/codipack/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/codipack/install.sh	(revision 24686)
@@ -2,7 +2,19 @@
 set -eu
 
-#Some cleanup
-rm -rf install 
+VER=1.8
+
+# Cleanup from previous installation
+rm -rf install CoDiPack-$VER.tar.gz
 
 #Download development version
 svn co https://github.com/SciCompKL/CoDiPack.git/trunk install
+
+## Download source
+#$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/CoDiPack-${VER}.tar.gz" "CoDiPack-${VER}.tar.gz"
+#
+## Untar
+#tar -zxvf CoDiPack-$VER.tar.gz
+#
+## Move source into install directory
+#mv CoDiPack-$VER install
+#rm -rf CoDiPack-$VER/
Index: /issm/trunk/externalpackages/curl/install-7.67-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/curl/install-7.67-with_tests.sh	(revision 24686)
+++ /issm/trunk/externalpackages/curl/install-7.67-with_tests.sh	(revision 24686)
@@ -0,0 +1,41 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="7.67.0"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/curl-${VER}.tar.gz" "curl-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf curl-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv curl-$VER/* src
+rm -rf curl-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/curl/install" \
+	--disable-manual
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make test
+	make install
+else
+	make -j $1
+	make -j $1 test
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ../..
Index: /issm/trunk/externalpackages/curl/install-7.67.sh
===================================================================
--- /issm/trunk/externalpackages/curl/install-7.67.sh	(revision 24686)
+++ /issm/trunk/externalpackages/curl/install-7.67.sh	(revision 24686)
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="7.67.0"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/curl-${VER}.tar.gz" "curl-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf curl-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv curl-$VER/* src
+rm -rf curl-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/curl/install" \
+	--disable-manual \
+	--disable-verbose
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ../..
Index: /issm/trunk/externalpackages/dakota/configs/6.2/CMakeLists.txt.libvars.patch
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/CMakeLists.txt.libvars.patch	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/CMakeLists.txt.libvars.patch	(revision 24686)
@@ -0,0 +1,6 @@
+156c153,155
+< # TODO: Can't this be integrated into the following logic?
+---
+> # TODO: Can't this be integrated into the following logic?
+> set(BLAS_LIBS $ENV{BLAS_LIBS})
+> set(LAPACK_LIBS $ENV{LAPACK_LIBS})
Index: /issm/trunk/externalpackages/dakota/configs/6.2/linux/cmake/BuildDakotaCustom.cmake
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/linux/cmake/BuildDakotaCustom.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/linux/cmake/BuildDakotaCustom.cmake	(revision 24686)
@@ -0,0 +1,95 @@
+##############################################################################
+#
+# Template CMake Configuration File.
+#
+##############################################################################
+# The following CMake variables represent the minimum set of variables
+# that are required to allow Dakota to
+#   * find all prerequisite third party libraries (TPLs)
+#   * configure compiler and MPI options
+#   * set Dakota install path
+#
+# Instructions:
+# 1. Read Dakota/INSTALL - Source Quick Start to use this template file.
+#
+# 2. Uncomment CMake variables below ONLY for values you need to change for
+#    your platform. Edit variables as needed.
+#
+#    For example, if you are using a custom install of Boost, installed in
+#    /home/me/usr/boost, uncomment both CMake Boost variables  and edit
+#    paths:
+#       set(BOOST_ROOT
+#           "/home/me/usr/boost"
+#           CACHE PATH "Use non-standard Boost install" FORCE)
+#       set( Boost_NO_SYSTEM_PATHS TRUE
+#            CACHE BOOL "Supress search paths other than BOOST_ROOT" FORCE)
+#
+#    Save file and exit.
+#
+# 6. Run CMake with script file. At terminal window, type:
+#      $ cmake -C BuildCustom.cmake $DAK_SRC
+#
+#    If you have not followed instructions in INSTALL -Source Quick Start,
+#    you will need to replace BuildCustom.cmake with the actual filename of
+#    this file and $DAK_SRC with the actual path to Dakota source.
+#
+##############################################################################
+
+##############################################################################
+# Set BLAS, LAPACK library paths ONLY if in non-standard locations
+##############################################################################
+set( BLAS_LIBS
+      "$ENV{BLAS_LIBS}"
+      CACHE FILEPATH "Use non-standard BLAS library path" FORCE )
+set( LAPACK_LIBS
+      "$ENV{LAPACK_LIBS}"
+      CACHE FILEPATH "Use non-standard BLAS library path" FORCE )
+
+##############################################################################
+# Set additional compiler options
+# Uncomment and replace <flag> with actual compiler flag, e.g. -xxe4.2
+##############################################################################
+set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS}"
+     CACHE STRING "C Flags my platform" )
+set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}"
+     CACHE STRING "CXX Flags for my platform" )
+set( CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS}"
+     CACHE STRING "Fortran Flags for my platform" )
+
+##############################################################################
+# Set MPI options
+# Recommended practice is to set DAKOTA_HAVE_MPI and set MPI_CXX_COMPILER
+# to a compiler wrapper.
+##############################################################################
+set( DAKOTA_HAVE_MPI ON
+     CACHE BOOL "Build with MPI enabled" FORCE)
+set( MPI_INCLUDE_PATH "$ENV{MPI_INSTALL}/include"
+     CACHE FILEPATH "Use MPI headers" FORCE)
+set( MPI_LIBRARY "-L$ENV{MPI_INSTALL}/lib -lmpich"
+     CACHE FILEPATH "Use MPI library" FORCE)
+
+##############################################################################
+# Set Boost path if CMake cannot find your installed version of Boost or
+# if you have a custom Boost install location.
+##############################################################################
+set(BOOST_ROOT
+    $ENV{BOOST_ROOT}
+    CACHE PATH "Use non-standard Boost install" FORCE)
+set( Boost_NO_SYSTEM_PATHS TRUE
+     CACHE BOOL "Supress search paths other than BOOST_ROOT" FORCE)
+
+##############################################################################
+# Set Trilinos path if you want have a custom Trilinos install location. If
+# not set, the Trilinos package, teuchos, will be build during the Dakota
+# build.
+##############################################################################
+#set( Trilinos_DIR
+#      "path/to/Trilinos/install"
+#      CACHE PATH "Path to installed Trilinos" FORCE )
+
+##############################################################################
+# Customize DAKOTA
+##############################################################################
+set( CMAKE_INSTALL_PREFIX
+     $ENV{DAK_INSTALL}
+     CACHE PATH "Path to Dakota installation" )
Index: /issm/trunk/externalpackages/dakota/configs/6.2/linux/cmake/DakotaDev.cmake
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/linux/cmake/DakotaDev.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/linux/cmake/DakotaDev.cmake	(revision 24686)
@@ -0,0 +1,18 @@
+# CMake options for DAKOTA developer builds
+
+# Developer options
+#set(DAKOTA_HAVE_MPI TRUE CACHE BOOL "Enable MPI in DAKOTA?")
+set(ENABLE_DAKOTA_DOCS FALSE CACHE BOOL "Enable DAKOTA documentation build")
+set(ENABLE_SPEC_MAINT FALSE CACHE BOOL
+  "Enable DAKOTA specification maintenance mode?")
+set(PECOS_ENABLE_TESTS FALSE CACHE BOOL "Enable Pecos-specific tests?")
+
+# Not included from Mike's configs, but may help some
+
+# Disable optional X graphics
+#-DHAVE_X_GRAPHICS:BOOL=FALSE
+set(HAVE_X_GRAPHICS OFF CACHE BOOL "Disable dependency on X libraries" FORCE)
+
+# CMake 2.8.6 has problems with RHEL6/Boost -- the following is a workaround
+#-DBoost_NO_BOOST_CMAKE=ON
+#set(Boost_NO_BOOST_CMAKE ON CACHE BOOL "Obtain desired behavior on RHEL6" FORCE)
Index: /issm/trunk/externalpackages/dakota/configs/6.2/linux/packages/pecos/src/pecos_global_defs.hpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/linux/packages/pecos/src/pecos_global_defs.hpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/linux/packages/pecos/src/pecos_global_defs.hpp	(revision 24686)
@@ -0,0 +1,190 @@
+/*  _______________________________________________________________________
+
+    PECOS: Parallel Environment for Creation Of Stochastics
+    Copyright (c) 2011, Sandia National Laboratories.
+    This software is distributed under the GNU Lesser General Public License.
+    For more information, see the README file in the top Pecos directory.
+    _______________________________________________________________________ */
+
+#ifndef PECOS_GLOBAL_DEFS_H
+#define PECOS_GLOBAL_DEFS_H
+
+#include <iostream>
+#include <cfloat>  // for DBL_MIN, DBL_MAX
+#include <cmath>
+#include <cstdlib>
+
+#include <boost/math/constants/constants.hpp>
+
+namespace Pecos {
+
+// --------------
+// Special values
+// --------------
+/// the value for PI used in various numerical routines
+#ifndef PI
+const double PI = boost::math::constants::pi<double>();
+#endif
+
+/// special value returned by index() when entry not found
+const size_t _NPOS = ~(size_t)0; // one's complement
+
+/// used in ostream data output functions
+const int WRITE_PRECISION = 10;
+
+/// small value used for protecting division by zero, etc.; an alternative
+/// to DBL_MIN that is less likely to cause underflow/overflow when numbers
+/// larger than it are used in calculations
+const double SMALL_NUMBER = 1.e-25;
+/// large value used as a surrogate for infinity in error traps; an alternative
+/// to DBL_MAX or inf that is less likely to cause underflow/overflow when used
+/// in subsequent calculations
+const double LARGE_NUMBER = 1.e+50;
+
+// define special values for vector/matrix data copying modes
+enum { DEFAULT_COPY=0, SHALLOW_COPY, DEEP_COPY };
+
+// define special values for ExpansionConfigOptions::outputLevel
+enum { SILENT_OUTPUT, QUIET_OUTPUT, NORMAL_OUTPUT, VERBOSE_OUTPUT,
+       DEBUG_OUTPUT };
+
+// define special values for ranVarTypesX/U
+enum { NO_TYPE=0, CONTINUOUS_DESIGN, STD_NORMAL, NORMAL, BOUNDED_NORMAL,
+       LOGNORMAL, BOUNDED_LOGNORMAL, STD_UNIFORM, UNIFORM, LOGUNIFORM,
+       TRIANGULAR, STD_EXPONENTIAL, EXPONENTIAL, STD_BETA, BETA, STD_GAMMA,
+       GAMMA, GUMBEL, FRECHET, WEIBULL, HISTOGRAM_BIN, CONTINUOUS_INTERVAL,
+       CONTINUOUS_STATE, STOCHASTIC_EXPANSION };
+
+// define special values for secondaryACVarMapTargets/secondaryADVarMapTargets
+enum { NO_TARGET=0, CDV_LWR_BND, CDV_UPR_BND, DDRIV_LWR_BND, DDRIV_UPR_BND,
+       N_MEAN, N_STD_DEV, N_LWR_BND, N_UPR_BND, N_LOCATION, N_SCALE, LN_MEAN,
+       LN_STD_DEV, LN_LAMBDA, LN_ZETA, LN_ERR_FACT, LN_LWR_BND, LN_UPR_BND,
+       U_LWR_BND, U_UPR_BND, U_LOCATION, U_SCALE, LU_LWR_BND, LU_UPR_BND,
+       T_MODE, T_LWR_BND, T_UPR_BND, T_LOCATION, T_SCALE, E_BETA,
+       BE_ALPHA, BE_BETA, BE_LWR_BND, BE_UPR_BND, GA_ALPHA, GA_BETA,
+       GU_ALPHA, GU_BETA, F_ALPHA, F_BETA, W_ALPHA, W_BETA,
+       P_LAMBDA, BI_P_PER_TRIAL, BI_TRIALS, NBI_P_PER_TRIAL, NBI_TRIALS,
+       GE_P_PER_TRIAL, HGE_TOT_POP, HGE_SEL_POP, HGE_FAILED,
+       CSV_LWR_BND, CSV_UPR_BND, DSRIV_LWR_BND, DSRIV_UPR_BND };
+
+/// derived basis approximation types
+enum { NO_BASIS=0, //FOURIER_BASIS, EIGEN_BASIS,
+       GLOBAL_NODAL_INTERPOLATION_POLYNOMIAL,
+       PIECEWISE_NODAL_INTERPOLATION_POLYNOMIAL,
+       GLOBAL_HIERARCHICAL_INTERPOLATION_POLYNOMIAL,
+       PIECEWISE_HIERARCHICAL_INTERPOLATION_POLYNOMIAL,
+       GLOBAL_REGRESSION_ORTHOGONAL_POLYNOMIAL,
+       GLOBAL_PROJECTION_ORTHOGONAL_POLYNOMIAL,
+       GLOBAL_ORTHOGONAL_POLYNOMIAL };
+       //PIECEWISE_REGRESSION_ORTHOGONAL_POLYNOMIAL,
+       //PIECEWISE_PROJECTION_ORTHOGONAL_POLYNOMIAL,
+       //PIECEWISE_ORTHOGONAL_POLYNOMIAL };
+
+/// derived basis polynomial types (orthogonal polynomial order follows
+/// uncertain variable spec order of normal, uniform, exponential, beta, gamma)
+enum { NO_POLY=0, HERMITE_ORTHOG, LEGENDRE_ORTHOG, LAGUERRE_ORTHOG,
+       JACOBI_ORTHOG, GEN_LAGUERRE_ORTHOG, CHEBYSHEV_ORTHOG, NUM_GEN_ORTHOG,
+       LAGRANGE_INTERP, HERMITE_INTERP, PIECEWISE_LINEAR_INTERP,
+       PIECEWISE_QUADRATIC_INTERP, PIECEWISE_CUBIC_INTERP };
+
+/// integration rules within VPISparseGrid (1-12: CC through User-closed)
+/// and beyond (GOLUB_WELSCH, NEWTON_COTES)
+enum { NO_RULE=0, CLENSHAW_CURTIS, FEJER2, GAUSS_PATTERSON, GAUSS_LEGENDRE,
+       GAUSS_HERMITE, GEN_GAUSS_HERMITE, GAUSS_LAGUERRE, GEN_GAUSS_LAGUERRE,
+       GAUSS_JACOBI, GENZ_KEISTER, /*USER_OPEN, USER_CLOSED,*/ GOLUB_WELSCH,
+       NEWTON_COTES };
+
+// growth rules within VPISparseGrid
+//enum { DEFAULT_GROWTH=0, SLOW_LINEAR, SLOW_LINEAR_ODD, MODERATE_LINEAR,
+//       SLOW_EXPONENTIAL, MODERATE_EXPONENTIAL, FULL_EXPONENTIAL };
+
+/// options for synchronizing linear and exponential growth rule settings
+/// (consistent with slow/moderate/full growth for new level_to_growth_*
+/// functions in sandia_rules.cpp)
+enum { SLOW_RESTRICTED_GROWTH, MODERATE_RESTRICTED_GROWTH,
+       UNRESTRICTED_GROWTH };
+
+/// solution approaches for calculating the polynomial basis coefficients
+/// (options for ExpansionConfigOptions::expCoeffsSolnApproach)
+enum { QUADRATURE, CUBATURE, LIGHTWEIGHT_SPARSE_GRID, COMBINED_SPARSE_GRID,
+       HIERARCHICAL_SPARSE_GRID, SAMPLING, DEFAULT_REGRESSION,
+       DEFAULT_LEAST_SQ_REGRESSION, SVD_LEAST_SQ_REGRESSION,
+       EQ_CON_LEAST_SQ_REGRESSION, BASIS_PURSUIT, BASIS_PURSUIT_DENOISING,
+       ORTHOG_MATCH_PURSUIT, LASSO_REGRESSION, LEAST_ANGLE_REGRESSION,
+       ORTHOG_LEAST_INTERPOLATION };
+/// options for BasisConfigOptions::nestingOverride (inactive)
+enum { NO_NESTING_OVERRIDE=0, NESTED, NON_NESTED };
+/// options for overriding the default growth restriction policy
+enum { NO_GROWTH_OVERRIDE=0, RESTRICTED, UNRESTRICTED };
+/// options for ExpansionConfigOptions::refinementType (inactive)
+enum { NO_REFINEMENT=0, P_REFINEMENT, H_REFINEMENT };
+/// options for ExpansionConfigOptions::refinementControl
+enum { NO_CONTROL=0, UNIFORM_CONTROL, LOCAL_ADAPTIVE_CONTROL,
+       DIMENSION_ADAPTIVE_CONTROL_SOBOL, DIMENSION_ADAPTIVE_CONTROL_DECAY,
+       DIMENSION_ADAPTIVE_CONTROL_GENERALIZED };
+
+/// options for expansion basis type
+enum { DEFAULT_BASIS=0, TENSOR_PRODUCT_BASIS, TOTAL_ORDER_BASIS,
+       ADAPTED_BASIS_GENERALIZED, ADAPTED_BASIS_EXPANDING_FRONT,
+       NODAL_INTERPOLANT, HIERARCHICAL_INTERPOLANT };
+
+/// mode of integration driver: integration versus interpolation
+enum { DEFAULT_MODE=0, INTEGRATION_MODE, INTERPOLATION_MODE };
+
+/// options for local basis functions within PiecewiseInterpPolynomial
+enum { LINEAR_EQUIDISTANT, LINEAR, QUADRATIC_EQUIDISTANT, QUADRATIC,
+       CUBIC_EQUIDISTANT, CUBIC };
+
+/// special values for nodal interpolation of variance and variance gradient
+enum { INTERPOLATION_OF_PRODUCTS, REINTERPOLATION_OF_PRODUCTS,
+       PRODUCT_OF_INTERPOLANTS_FAST, PRODUCT_OF_INTERPOLANTS_FULL };
+
+/// special values for polynomial expansion combination
+enum { NO_COMBINE=0,  ADD_COMBINE, MULT_COMBINE, ADD_MULT_COMBINE };
+
+
+// ----------------
+// Standard streams
+// ----------------
+#define PCout std::cout
+#define PCerr std::cerr
+
+
+// --------------
+// Global objects
+// --------------
+/// Dummy struct for overloading letter-envelope constructors.
+/** BaseConstructor is used to overload the constructor for the base class
+    portion of letter objects.  It avoids infinite recursion (Coplien p.139)
+    in the letter-envelope idiom by preventing the letter from instantiating
+    another envelope.  Putting this struct here avoids circular dependencies. */
+struct BaseConstructor {
+  BaseConstructor(int = 0) {} ///< C++ structs can have constructors
+};
+
+
+// ----------------
+// Global functions
+// ----------------
+
+/// global function which handles serial or parallel aborts
+void abort_handler(int code);
+
+
+inline void abort_handler(int code)
+{ std::exit(code); } // for now, prior to use of MPI
+
+
+/** Templatized abort_handler_t method that allows for convenient return from
+    methods that otherwise have no sensible return from error clauses.  Usage:
+    MyType& method() { return abort_handler<MyType&>(-1); } */
+template <typename T>
+T abort_handler_t(int code)
+{
+  abort_handler(code);
+  throw code;
+}
+
+} // namespace Pecos
+
+#endif // PECOS_GLOBAL_DEFS_H
Index: /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/BuildDakotaCustom.cmake
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/BuildDakotaCustom.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/BuildDakotaCustom.cmake	(revision 24686)
@@ -0,0 +1,95 @@
+##############################################################################
+#
+# Template CMake Configuration File.
+#
+##############################################################################
+# The following CMake variables represent the minimum set of variables
+# that are required to allow Dakota to
+#   * find all prerequisite third party libraries (TPLs)
+#   * configure compiler and MPI options
+#   * set Dakota install path
+#
+# Instructions:
+# 1. Read Dakota/INSTALL - Source Quick Start to use this template file.
+#
+# 2. Uncomment CMake variables below ONLY for values you need to change for
+#    your platform. Edit variables as needed.
+#
+#    For example, if you are using a custom install of Boost, installed in
+#    /home/me/usr/boost, uncomment both CMake Boost variables  and edit
+#    paths:
+#       set(BOOST_ROOT
+#           "/home/me/usr/boost"
+#           CACHE PATH "Use non-standard Boost install" FORCE)
+#       set( Boost_NO_SYSTEM_PATHS TRUE
+#            CACHE BOOL "Supress search paths other than BOOST_ROOT" FORCE)
+#
+#    Save file and exit.
+#
+# 6. Run CMake with script file. At terminal window, type:
+#      $ cmake -C BuildCustom.cmake $DAK_SRC
+#
+#    If you have not followed instructions in INSTALL -Source Quick Start,
+#    you will need to replace BuildCustom.cmake with the actual filename of
+#    this file and $DAK_SRC with the actual path to Dakota source.
+#
+##############################################################################
+
+##############################################################################
+# Set BLAS, LAPACK library paths ONLY if in non-standard locations
+##############################################################################
+set( BLAS_LIBS
+      "$ENV{BLAS_LIBS}"
+      CACHE FILEPATH "Use non-standard BLAS library path" FORCE )
+set( LAPACK_LIBS
+      "$ENV{LAPACK_LIBS}"
+      CACHE FILEPATH "Use non-standard BLAS library path" FORCE )
+
+##############################################################################
+# Set additional compiler options
+# Uncomment and replace <flag> with actual compiler flag, e.g. -xxe4.2
+##############################################################################
+set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS}"
+     CACHE STRING "C Flags my platform" )
+set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}"
+     CACHE STRING "CXX Flags for my platform" )
+set( CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS}"
+     CACHE STRING "Fortran Flags for my platform" )
+
+##############################################################################
+# Set MPI options
+# Recommended practice is to set DAKOTA_HAVE_MPI and set MPI_CXX_COMPILER
+# to a compiler wrapper.
+##############################################################################
+set( DAKOTA_HAVE_MPI ON
+     CACHE BOOL "Build with MPI enabled" FORCE)
+set( MPI_INCLUDE_PATH "$ENV{MPI_INSTALL}/include"
+     CACHE FILEPATH "Use MPI headers" FORCE)
+set( MPI_LIBRARY "-L$ENV{MPI_INSTALL}/lib -lmpich"
+     CACHE FILEPATH "Use MPI library" FORCE)
+
+##############################################################################
+# Set Boost path if CMake cannot find your installed version of Boost or
+# if you have a custom Boost install location.
+##############################################################################
+set(BOOST_ROOT
+    $ENV{BOOST_ROOT}
+    CACHE PATH "Use non-standard Boost install" FORCE)
+set( Boost_NO_SYSTEM_PATHS TRUE
+     CACHE BOOL "Supress search paths other than BOOST_ROOT" FORCE)
+
+##############################################################################
+# Set Trilinos path if you want have a custom Trilinos install location. If
+# not set, the Trilinos package, teuchos, will be build during the Dakota
+# build.
+##############################################################################
+#set( Trilinos_DIR
+#      "path/to/Trilinos/install"
+#      CACHE PATH "Path to installed Trilinos" FORCE )
+
+##############################################################################
+# Customize DAKOTA
+##############################################################################
+set( CMAKE_INSTALL_PREFIX
+     $ENV{DAK_INSTALL}
+     CACHE PATH "Path to Dakota installation" )
Index: /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/DakotaDev.cmake
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/DakotaDev.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/DakotaDev.cmake	(revision 24686)
@@ -0,0 +1,18 @@
+# CMake options for DAKOTA developer builds
+
+# Developer options
+#set(DAKOTA_HAVE_MPI TRUE CACHE BOOL "Enable MPI in DAKOTA?")
+set(ENABLE_DAKOTA_DOCS FALSE CACHE BOOL "Enable DAKOTA documentation build")
+set(ENABLE_SPEC_MAINT FALSE CACHE BOOL
+  "Enable DAKOTA specification maintenance mode?")
+set(PECOS_ENABLE_TESTS FALSE CACHE BOOL "Enable Pecos-specific tests?")
+
+# Not included from Mike's configs, but may help some
+
+# Disable optional X graphics
+#-DHAVE_X_GRAPHICS:BOOL=FALSE
+set(HAVE_X_GRAPHICS OFF CACHE BOOL "Disable dependency on X libraries" FORCE)
+
+# CMake 2.8.6 has problems with RHEL6/Boost -- the following is a workaround
+#-DBoost_NO_BOOST_CMAKE=ON
+#set(Boost_NO_BOOST_CMAKE ON CACHE BOOL "Obtain desired behavior on RHEL6" FORCE)
Index: /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/InstallDarwinDylibs.cmake
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/InstallDarwinDylibs.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/mac/cmake/InstallDarwinDylibs.cmake	(revision 24686)
@@ -0,0 +1,132 @@
+# Find the Darwin dylib dependencies of dakota, excluding system libraries,
+# and install to ${CMAKE_INSTALL_PREFIX}/bin
+
+# NOTE: This script will only work for make install from top of build tree
+# TODO: Review string quoting conventions and test with spaces in filename
+
+# Function to install a single Dakota dll dependency
+# (used by multiple platforms)
+function(dakota_install_dll dakota_dll)
+  if (EXISTS "${dakota_dll}")
+    get_filename_component(dll_filename "${dakota_dll}" NAME)
+    message("-- Installing: ${CMAKE_INSTALL_PREFIX}/bin/${dll_filename}")
+    execute_process(
+      COMMAND
+        ${CMAKE_COMMAND} -E copy "${dakota_dll}" "${CMAKE_INSTALL_PREFIX}/bin"
+      )
+  else()
+    message(WARNING "Install couldn't find dynamic dependency ${dakota_dll}")
+  endif()
+endfunction()
+
+if ( DEFINED ENV{DAK_BUILD} )
+  set ( CMAKE_CURRENT_BINARY_DIR $ENV{DAK_BUILD} )
+elseif ( DAKOTA_JENKINS_BUILD OR DEFINED ENV{WORKSPACE} )
+  # By convention, all Dakota, jenkins-driven build jobs use a 'build'
+  # subdir for clear separation of source and build trees in the WORKSPACE
+  set( CMAKE_CURRENT_BINARY_DIR $ENV{WORKSPACE}/build )
+elseif ( NOT CMAKE_CURRENT_BINARY_DIR )
+  set( CMAKE_CURRENT_BINARY_DIR $ENV{PWD} )
+endif()
+
+message( "CMAKE_SHARED_LIBRARY_SUFFIX: ${CMAKE_SHARED_LIBRARY_SUFFIX}" )
+#message( "... If NOT .dylib, then CMake cache is not respected" )
+
+# otool may resolve symlinks, do the same for the build tree location
+get_filename_component(resolved_build_dir ${CMAKE_CURRENT_BINARY_DIR} REALPATH)
+
+# Get the dylibs excluding system libraries as a semicolon-separated list
+execute_process(
+  COMMAND otool -L "${CMAKE_CURRENT_BINARY_DIR}/src/dakota"
+  # Omit the header and get the library only
+  COMMAND awk "FNR > 1 {print $1}"
+  # Omit system libraries
+  COMMAND egrep -v "(^/System|^/usr/lib|^/usr/X11)"
+  COMMAND tr "\\n" ";"
+  OUTPUT_VARIABLE dakota_darwin_dylibs
+  )
+
+# Probe the CMakeCache.txt for location of the known Boost dynlib dependency
+
+file( STRINGS ${CMAKE_CURRENT_BINARY_DIR}/CMakeCache.txt
+      Boost_LIBRARY_DIRS_PAIR REGEX "^Boost_LIBRARY_DIRS:FILEPATH=(.*)$" )
+string( REGEX REPLACE "^Boost_LIBRARY_DIRS:FILEPATH=(.*)$" "\\1"
+        Cached_Boost_LIBRARY_DIRS "${Boost_LIBRARY_DIRS_PAIR}" )
+
+#message("Boost rpath=${Cached_Boost_LIBRARY_DIRS}")
+
+# Modify dakota_darwin_dylibs for "special case" of Boost
+#   otool DOES NOT return absolute path to Boost libs, so workaround the issue
+
+set(dakota_boost_dylibs "")
+
+# Ignore empty list elements:
+cmake_policy(PUSH)
+cmake_policy(SET CMP0007 OLD)
+
+foreach(pri_lib ${dakota_darwin_dylibs})
+  string(REGEX REPLACE "^libboost_(.*)$"
+    "${Cached_Boost_LIBRARY_DIRS}/libboost_\\1"
+    boost_dylib_fullpath "${pri_lib}")
+
+  if( ${pri_lib} MATCHES libboost_ )
+    # REMOVE boost entries if NOT absolute path
+    list(REMOVE_ITEM dakota_darwin_dylibs ${pri_lib})
+    list(APPEND dakota_boost_dylibs ${boost_dylib_fullpath})
+  endif()
+endforeach()
+
+# Get the secondary dylibs of the dylibs
+foreach(pri_lib ${dakota_darwin_dylibs})
+  execute_process(
+    COMMAND otool -L "${pri_lib}"
+    COMMAND awk "FNR > 1 {print $1}"
+    # Omit system libraries
+    COMMAND egrep -v "(^/System|^/usr/lib|^/usr/X11)"
+    COMMAND tr "\\n" ";"
+    OUTPUT_VARIABLE dakota_secondary_dylibs
+    )
+  list(APPEND dakota_darwin_dylibs ${dakota_secondary_dylibs})
+endforeach()
+
+# Make a second pass over the list to prepend paths to boost libs that were
+# discovered while looking for dakota_secondary_dylibs. Any duplicates
+# will be removed below.
+foreach(pri_lib ${dakota_darwin_dylibs})
+  string(REGEX REPLACE "^libboost_(.*)$"
+    "${Cached_Boost_LIBRARY_DIRS}/libboost_\\1"
+    boost_dylib_fullpath "${pri_lib}")
+
+  if( ${pri_lib} MATCHES libboost_ )
+    # REMOVE boost entries if NOT absolute path
+    list(REMOVE_ITEM dakota_darwin_dylibs ${pri_lib})
+    list(APPEND dakota_boost_dylibs ${boost_dylib_fullpath})
+  endif()
+endforeach()
+
+# otool finished proccessing dylibs -
+# OK to "re-insert" Boost dylibs into the list (ABSOLUTE PATH!)
+
+#message("Boost dylibs=${dakota_boost_dylibs}")
+list(APPEND dakota_darwin_dylibs ${dakota_boost_dylibs})
+
+list(REMOVE_DUPLICATES dakota_darwin_dylibs)
+cmake_policy(POP)
+
+# Process each DLL and install, excluding anything in the build tree
+foreach(dakota_dll ${dakota_darwin_dylibs})
+  string(REGEX REPLACE "^${CMAKE_CURRENT_BINARY_DIR}(.*)$"
+    "dak_omit/\\1" omit_btree_dll "${dakota_dll}")
+  string(REGEX REPLACE "^${resolved_build_dir}(.*)$"
+    "dak_omit/\\1" omit_resolved_btree_dll "${dakota_dll}")
+
+  if( ${omit_btree_dll} MATCHES dak_omit )
+    #message("-- EXCLUDE: ${omit_btree_dll} - OK, already installed in lib")
+    message("-- EXCLUDE: ${dakota_dll} - OK, already installed in lib")
+  elseif( ${omit_resolved_btree_dll} MATCHES dak_omit )
+    message("-- EXCLUDE: ${dakota_dll} - OK, already installed in lib")
+  else()
+    dakota_install_dll("${dakota_dll}")
+  endif()
+endforeach()
+
Index: /issm/trunk/externalpackages/dakota/configs/6.2/mac/packages/VPISparseGrid/src/sandia_rules.cpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/mac/packages/VPISparseGrid/src/sandia_rules.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/mac/packages/VPISparseGrid/src/sandia_rules.cpp	(revision 24686)
@@ -0,0 +1,25739 @@
+# include "sandia_rules.hpp"
+
+# include <cstdlib>
+# include <iomanip>
+# include <iostream>
+# include <cmath>
+# include <ctime>
+
+namespace webbur
+{
+//****************************************************************************80
+
+void binary_vector_next ( int n, int bvec[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    BINARY_VECTOR_NEXT generates the next binary vector.
+//
+//  Discussion:
+//
+//    A binary vector is a vector whose entries are 0 or 1.
+//
+//    The user inputs an initial zero vector to start.  The program returns
+//    the "next" vector.
+//
+//    The vectors are produced in the order:
+//
+//    ( 0, 0, 0, ..., 0 )
+//    ( 1, 0, 0, ..., 0 )
+//    ( 0, 1, 0, ..., 0 )
+//    ( 1, 1, 0, ..., 0 )
+//    ( 0, 0, 1, ..., 0 )
+//    ( 1, 0, 1, ..., 0 )
+//               ...
+//    ( 1, 1, 1, ..., 1)
+//
+//    and the "next" vector after (1,1,...,1) is (0,0,...,0).  That is,
+//    we allow wrap around.
+//
+//  Example:
+//
+//    N = 3
+//
+//    Input      Output
+//    -----      ------
+//    0 0 0  =>  1 0 0
+//    1 0 0  =>  0 1 0
+//    0 1 0  =>  1 1 0
+//    1 1 0  =>  0 0 1
+//    0 0 1  =>  1 0 1
+//    1 0 1  =>  0 1 1
+//    0 1 1  =>  1 1 1
+//    1 1 1  =>  0 0 0
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 September 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the dimension of the vectors.
+//
+//    Input/output, int BVEC[N], on output, the successor
+//    to the input vector.
+//
+{
+  int i;
+
+  for ( i = 0; i < n; i++ )
+  {
+    if ( bvec[i] == 1 )
+    {
+      bvec[i] = 0;
+    }
+    else
+    {
+      bvec[i] = 1;
+      break;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void ccn_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CCN_COMPUTE computes a nested Clenshaw Curtis quadrature rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::ccn_compute_points ( n, x );
+  webbur::ccn_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void ccn_compute_np ( int n, int np, double p[], double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CCN_COMPUTE_NP computes a nested Clenshaw Curtis quadrature rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::ccn_compute_points ( n, x );
+  webbur::ccn_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void ccn_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CCN_COMPUTE_POINTS: compute nested Clenshaw Curtis points.
+//
+//  Discussion:
+//
+//    We want to compute the following sequence:
+//
+//    1/2,
+//    0, 1
+//    1/4, 3/4
+//    1/8, 3/8, 5/8, 7/8,
+//    1/16, 3/16, 5/16, 7/16, 9/16, 11/16, 13/16, 15/16, and so on.
+//
+//    But we would prefer that the numbers in each row be regrouped in pairs
+//    that are symmetric about 1/2, with the number above 1/2 coming first.
+//    Thus, the last row might become:
+//    (9/16, 7/16), (11/16, 5/16), ..., (15/16, 1/16).
+//
+//    Once we have our sequence, we apply the Chebyshev transformation
+//    which maps [0,1] to [-1,+1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    06 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of elements to compute.
+//
+//    Output, double X[N], the elements of the sequence.
+//
+{
+  int d;
+  int i;
+  int k;
+  int m;
+  double pi = 3.141592653589793;
+  int td;
+  int tu;
+//
+//  Handle first three entries specially.
+//
+  if ( 1 <= n )
+  {
+    x[0] = 0.5;
+  }
+
+  if ( 2 <= n )
+  {
+    x[1] = 1.0;
+  }
+
+  if ( 3 <= n )
+  {
+    x[2] = 0.0;
+  }
+
+  m = 3;
+  d = 2;
+
+  while ( m < n )
+  {
+    tu = d + 1;
+    td = d - 1;
+
+    k = webbur::i4_min ( d, n - m );
+
+    for ( i = 1; i <= k; i++ )
+    {
+      if ( ( i % 2 ) == 1 )
+      {
+        x[m+i-1] = tu / 2.0 / ( double ) ( k );
+        tu = tu + 2;
+      }
+      else
+      {
+        x[m+i-1] = td / 2.0 / ( double ) ( k );
+        td = td - 2;
+      }
+    }
+    m = m + k;
+    d = d * 2;
+  }
+//
+//  Apply the Chebyshev transformation.
+//
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = std::cos ( x[i] * pi );
+  }
+  x[0] = 0.0;
+
+  if ( 2 <= n )
+  {
+    x[1] = -1.0;
+  }
+
+  if ( 3 <= n )
+  {
+    x[2] = +1.0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void ccn_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CCN_COMPUTE_POINTS_NP: nested Clenshaw Curtis quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::ccn_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void ccn_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CCN_COMPUTE_WEIGHTS: weights for nested Clenshaw Curtis rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order of the rule.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *x;
+  double x_max;
+  double x_min;
+
+  x = new double[n];
+
+  webbur::ccn_compute_points ( n, x );
+//
+//  Get the weights.
+//
+  x_min = -1.0;
+  x_max = +1.0;
+
+  webbur::nc_compute ( n, x_min, x_max, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void ccn_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CCN_COMPUTE_WEIGHTS_NP: nested Clenshaw Curtis quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::ccn_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev1_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_COMPUTE computes a Chebyshev type 1 quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X) / sqrt ( 1 - x^2 ) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CHEBYSHEV1_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = pi / ( double ) ( n );
+  }
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = std::cos ( pi * ( double ) ( 2 * n - 1 - 2 * i )
+                         / ( double ) ( 2 * n ) );
+  }
+  if ( ( n % 2 ) == 1 )
+  {
+    x[(n-1)/2] = 0.0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev1_compute_np ( int n, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_COMPUTE_NP computes a Chebyshev type 1 quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X) / sqrt ( 1 - x^2 ) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::chebyshev1_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev1_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_COMPUTE_POINTS computes Chebyshev type 1 quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CHEBYSHEV1_COMPUTE_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] =  std::cos ( pi * ( double ) ( 2 * n - 1 - 2 * i )
+                          / ( double ) ( 2 * n ) );
+  }
+  if ( ( n % 2 ) == 1 )
+  {
+    x[(n-1)/2] = 0.0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev1_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_COMPUTE_POINTS_NP computes Chebyshev type 1 quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::chebyshev1_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev1_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_COMPUTE_WEIGHTS computes Chebyshev type 1 quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CHEBYSHEV1_COMPUTE_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = pi / ( double ) ( n );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev1_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_COMPUTE_WEIGHTS_NP: Chebyshev type 1 quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::chebyshev1_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+double chebyshev1_integral ( int expon )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV1_INTEGRAL evaluates a monomial Chebyshev type 1 integral.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -1 <= x <= +1 ) x^n / sqrt ( 1 - x^2 ) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent.
+//
+//    Output, double CHEBYSHEV1_INTEGRAL, the value of the exact integral.
+//
+{
+  double bot;
+  double exact;
+  int i;
+  double pi = 3.141592653589793;
+  double top;
+//
+//  Get the exact value of the integral.
+//
+  if ( ( expon % 2 ) == 0 )
+  {
+    top = 1;
+    bot = 1;
+    for ( i = 2; i <= expon; i = i + 2 )
+    {
+      top = top * ( i - 1 );
+      bot = bot *   i;
+    }
+
+    exact = pi * ( double ) ( top ) / ( double ) ( bot );
+  }
+  else
+  {
+    exact = 0.0;
+  }
+
+  return exact;
+}
+//****************************************************************************80
+
+void chebyshev2_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_COMPUTE computes a Chebyshev type 2 quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -1 <= x <= 1 ) f(x)  sqrt ( 1 - x^2 )  dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double angle;
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CHEBYSHEV2_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    angle = pi * ( double ) ( n - i ) / ( double ) ( n + 1 );
+    w[i] = pi / ( double ) ( n + 1 ) * std::pow ( std::sin ( angle ), 2 );
+    x[i] = std::cos ( angle );
+  }
+
+  if ( ( n % 2 ) == 1 )
+  {
+    x[(n-1)/2] = 0.0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev2_compute_np ( int n, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_COMPUTE_NP computes a Chebyshev type 2 quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X)  sqrt ( 1 - x^2 )  dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::chebyshev2_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev2_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_COMPUTE_POINTS computes Chebyshev type 2 quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  double angle;
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CHEBYSHEV2_COMPUTE_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    angle = pi * ( double ) ( n - i ) / ( double ) ( n + 1 );
+    x[i] =  std::cos ( angle );
+  }
+
+  if ( ( n % 2 ) == 1 )
+  {
+    x[(n-1)/2] = 0.0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev2_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_COMPUTE_POINTS_NP computes Chebyshev type 2 quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::chebyshev2_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev2_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_COMPUTE_WEIGHTS computes Chebyshev type 2 quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double angle;
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CHEBYSHEV2_COMPUTE_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    angle = pi * ( double ) ( n - i ) / ( double ) ( n + 1 );
+    w[i] = pi / ( double ) ( n + 1 ) * std::pow ( std::sin ( angle ), 2 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void chebyshev2_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_COMPUTE_WEIGHTS_NP: Chebyshev type 2 quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::chebyshev2_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+double chebyshev2_integral ( int expon )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CHEBYSHEV2_INTEGRAL evaluates a monomial Chebyshev type 2 integral.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -1 <= x <= +1 ) x^n * sqrt ( 1 - x^2 ) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent.
+//
+//    Output, double CHEBYSHEV2_INTEGRAL, the value of the exact integral.
+//
+{
+  double bot;
+  double exact;
+  int i;
+  double pi = 3.141592653589793;
+  double top;
+//
+//  Get the exact value of the integral.
+//
+  if ( ( expon % 2 ) == 0 )
+  {
+    top = 1;
+    bot = 1;
+    for ( i = 2; i <= expon; i = i + 2 )
+    {
+      top = top * ( i - 1 );
+      bot = bot *   i;
+    }
+
+	bot = bot * ( double ) ( expon + 2 );
+
+    exact = pi * ( double ) ( top ) / ( double ) ( bot );
+  }
+  else
+  {
+    exact = 0.0;
+  }
+  return exact;
+}
+//****************************************************************************80
+
+void clenshaw_curtis_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CLENSHAW_CURTIS_COMPUTE computes a Clenshaw Curtis quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 March 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double b;
+  int i;
+  int j;
+  double pi = 3.141592653589793;
+  double theta;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CLENSHAW_CURTIS_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+  else if ( n == 1 )
+  {
+    x[0] = 0.0;
+    w[0] = 2.0;
+  }
+  else
+  {
+    for ( i = 0; i < n; i++ )
+    {
+      x[i] =  std::cos ( ( double ) ( n - 1 - i ) * pi
+                       / ( double ) ( n - 1     ) );
+    }
+    x[0] = -1.0;
+    if ( ( n % 2 ) == 1 )
+    {
+      x[(n-1)/2] = 0.0;
+    }
+    x[n-1] = +1.0;
+
+    for ( i = 0; i < n; i++ )
+    {
+      theta = ( double ) ( i ) * pi / ( double ) ( n - 1 );
+
+      w[i] = 1.0;
+
+      for ( j = 1; j <= ( n - 1 ) / 2; j++ )
+      {
+        if ( 2 * j == ( n - 1 ) )
+        {
+          b = 1.0;
+        }
+        else
+        {
+          b = 2.0;
+        }
+
+        w[i] = w[i] - b *  std::cos ( 2.0 * ( double ) ( j ) * theta )
+          / ( double ) ( 4 * j * j - 1 );
+      }
+    }
+
+    w[0] = w[0] / ( double ) ( n - 1 );
+    for ( i = 1; i < n - 1; i++ )
+    {
+      w[i] = 2.0 * w[i] / ( double ) ( n - 1 );
+    }
+    w[n-1] = w[n-1] / ( double ) ( n - 1 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void clenshaw_curtis_compute_np ( int n, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CLENSHAW_CURTIS_COMPUTE_NP computes a Clenshaw Curtis quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::clenshaw_curtis_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void clenshaw_curtis_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CLENSHAW_CURTIS_COMPUTE_POINTS computes Clenshaw Curtis quadrature points.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    This rule is defined on [-1,1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int index;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CLENSHAW_CURTIS_COMPUTE_POINTS - Fatal error!\n";
+    std::cerr << "  N < 1.\n";
+    std::exit ( 1 );
+  }
+  else if ( n == 1 )
+  {
+    x[0] = 0.0;
+  }
+  else
+  {
+    for ( index = 1; index <= n; index++ )
+    {
+      x[index-1] =  std::cos ( ( double ) ( n - index ) * pi
+                             / ( double ) ( n - 1     ) );
+    }
+    x[0] = -1.0;
+    if ( ( n % 2 ) == 1 )
+    {
+      x[(n-1)/2] = 0.0;
+    }
+    x[n-1] = +1.0;
+  }
+  return;
+}
+//****************************************************************************80
+
+void clenshaw_curtis_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CLENSHAW_CURTIS_COMPUTE_POINTS_NP: Clenshaw Curtis quadrature points.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    This rule is defined on [-1,1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::clenshaw_curtis_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void clenshaw_curtis_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CLENSHAW_CURTIS_COMPUTE_WEIGHTS computes Clenshaw Curtis quadrature weights.
+//
+//  Discussion:
+//
+//    The user must preallocate space for the output array W.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Charles Clenshaw, Alan Curtis,
+//    A Method for Numerical Integration on an Automatic Computer,
+//    Numerische Mathematik,
+//    Volume 2, Number 1, December 1960, pages 197-205.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double b;
+  int i;
+  int j;
+  double pi = 3.141592653589793;
+  double theta;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "CLENSHAW_CURTIS_COMPUTE_WEIGHTS - Fatal error!\n";
+    std::cerr << "  N < 1.\n";
+    std::exit ( 1 );
+  }
+  else if ( n == 1 )
+  {
+    w[0] = 2.0;
+    return;
+  }
+
+  for ( i = 1; i <= n; i++ )
+  {
+    theta = ( double ) ( i - 1 ) * pi / ( double ) ( n - 1 );
+
+    w[i-1] = 1.0;
+
+    for ( j = 1; j <= ( n - 1 ) / 2; j++ )
+    {
+      if ( 2 * j == ( n - 1 ) )
+      {
+        b = 1.0;
+      }
+      else
+      {
+        b = 2.0;
+      }
+
+      w[i-1] = w[i-1] - b *  std::cos ( 2.0 * ( double ) ( j ) * theta )
+           / ( double ) ( 4 * j * j - 1 );
+    }
+  }
+
+  w[0] = w[0] / ( double ) ( n - 1 );
+  for ( i = 1; i < n - 1; i++ )
+  {
+    w[i] = 2.0 * w[i] / ( double ) ( n - 1 );
+  }
+  w[n-1] = w[n-1] / ( double ) ( n - 1 );
+
+  return;
+}
+//****************************************************************************80
+
+void clenshaw_curtis_compute_weights_np ( int n, int np, double p[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CLENSHAW_CURTIS_COMPUTE_WEIGHTS_NP: Clenshaw Curtis quadrature weights.
+//
+//  Discussion:
+//
+//    The user must preallocate space for the output array W.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Charles Clenshaw, Alan Curtis,
+//    A Method for Numerical Integration on an Automatic Computer,
+//    Numerische Mathematik,
+//    Volume 2, Number 1, December 1960, pages 197-205.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::clenshaw_curtis_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void comp_next ( int n, int k, int a[], bool *more, int *h, int *t )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    COMP_NEXT computes the compositions of the integer N into K parts.
+//
+//  Discussion:
+//
+//    A composition of the integer N into K parts is an ordered sequence
+//    of K nonnegative integers which sum to N.  The compositions (1,2,1)
+//    and (1,1,2) are considered to be distinct.
+//
+//    The routine computes one composition on each call until there are no more.
+//    For instance, one composition of 6 into 3 parts is
+//    3+2+1, another would be 6+0+0.
+//
+//    On the first call to this routine, set MORE = FALSE.  The routine
+//    will compute the first element in the sequence of compositions, and
+//    return it, as well as setting MORE = TRUE.  If more compositions
+//    are desired, call again, and again.  Each time, the routine will
+//    return with a new composition.
+//
+//    However, when the LAST composition in the sequence is computed
+//    and returned, the routine will reset MORE to FALSE, signaling that
+//    the end of the sequence has been reached.
+//
+//    This routine originally used a SAVE statement to maintain the
+//    variables H and T.  I have decided that it is safer
+//    to pass these variables as arguments, even though the user should
+//    never alter them.  This allows this routine to safely shuffle
+//    between several ongoing calculations.
+//
+//
+//    There are 28 compositions of 6 into three parts.  This routine will
+//    produce those compositions in the following order:
+//
+//     I         A
+//     -     ---------
+//     1     6   0   0
+//     2     5   1   0
+//     3     4   2   0
+//     4     3   3   0
+//     5     2   4   0
+//     6     1   5   0
+//     7     0   6   0
+//     8     5   0   1
+//     9     4   1   1
+//    10     3   2   1
+//    11     2   3   1
+//    12     1   4   1
+//    13     0   5   1
+//    14     4   0   2
+//    15     3   1   2
+//    16     2   2   2
+//    17     1   3   2
+//    18     0   4   2
+//    19     3   0   3
+//    20     2   1   3
+//    21     1   2   3
+//    22     0   3   3
+//    23     2   0   4
+//    24     1   1   4
+//    25     0   2   4
+//    26     1   0   5
+//    27     0   1   5
+//    28     0   0   6
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 July 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Albert Nijenhuis, Herbert Wilf.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Albert Nijenhuis, Herbert Wilf,
+//    Combinatorial Algorithms for Computers and Calculators,
+//    Second Edition,
+//    Academic Press, 1978,
+//    ISBN: 0-12-519260-6,
+//    LC: QA164.N54.
+//
+//  Parameters:
+//
+//    Input, int N, the integer whose compositions are desired.
+//
+//    Input, int K, the number of parts in the composition.
+//
+//    Input/output, int A[K], the parts of the composition.
+//
+//    Input/output, bool *MORE.
+//    Set MORE = FALSE on first call.  It will be reset to TRUE on return
+//    with a new composition.  Each new call returns another composition until
+//    MORE is set to FALSE when the last composition has been computed
+//    and returned.
+//
+//    Input/output, int *H, *T, two internal parameters needed for the
+//    computation.  The user should allocate space for these in the calling
+//    program, include them in the calling sequence, but never alter them!
+//
+{
+  int i;
+
+  if ( !( *more ) )
+  {
+    *t = n;
+    *h = 0;
+    a[0] = n;
+    for ( i = 1; i < k; i++ )
+    {
+       a[i] = 0;
+    }
+  }
+  else
+  {
+    if ( 1 < *t )
+    {
+      *h = 0;
+    }
+    *h = *h + 1;
+    *t = a[*h-1];
+    a[*h-1] = 0;
+    a[0] = *t - 1;
+    a[*h] = a[*h] + 1;
+  }
+
+  *more = ( a[k-1] != n );
+
+  return;
+}
+//****************************************************************************80
+
+double cpu_time ( )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    CPU_TIME reports the elapsed CPU time.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 July 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Output, double CPU_TIME, the current total elapsed CPU time in second.
+//
+{
+  double value;
+
+  value = ( double ) std::clock ( ) / ( double ) CLOCKS_PER_SEC;
+
+  return value;
+}
+//****************************************************************************80
+
+void dif_deriv ( int nd, double xd[], double yd[], int *ndp, double xdp[],
+  double ydp[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    DIF_DERIV computes the derivative of a polynomial in divided difference form.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 June 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Carl deBoor,
+//    A Practical Guide to Splines,
+//    Springer, 2001,
+//    ISBN: 0387953663,
+//    LC: QA1.A647.v27.
+//
+//  Parameters:
+//
+//    Input, int ND, the size of the input table.
+//
+//    Input, double XD[ND], the abscissas for the divided
+//    difference table.
+//
+//    Input, double YD[ND], the divided difference table.
+//
+//    Output, int *NDP, the size of the output table, which is ND-1.
+//
+//    Input, double XDP[NP], the abscissas for the divided
+//    difference table for the derivative.
+//
+//    Output, double YDP[NDP], the divided difference
+//    table for the derivative.
+//
+{
+  int i;
+  double *xd_temp;
+  double *yd_temp;
+//
+//  Using a temporary copy of the difference table, shift the
+//  abscissas to zero.
+//
+  xd_temp = new double[nd];
+  yd_temp = new double[nd];
+
+  for ( i = 0; i < nd; i++ )
+  {
+    xd_temp[i] = xd[i];
+  }
+  for ( i = 0; i < nd; i++ )
+  {
+    yd_temp[i] = yd[i];
+  }
+
+  webbur::dif_shift_zero ( nd, xd_temp, yd_temp );
+//
+//  Construct the derivative.
+//
+  *ndp = nd - 1;
+
+  for ( i = 0; i < *ndp; i++ )
+  {
+    xdp[i] = 0.0;
+  }
+
+  for ( i = 0; i < *ndp; i++ )
+  {
+    ydp[i] = ( double ) ( i + 1 ) * yd_temp[i+1];
+  }
+
+  delete [] xd_temp;
+  delete [] yd_temp;
+
+  return;
+}
+//****************************************************************************80
+
+void dif_shift_x ( int nd, double xd[], double yd[], double xv )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    DIF_SHIFT_X replaces one abscissa of a divided difference table with a new one.
+//
+//  Discussion:
+//
+//    This routine shifts the representation of a divided difference polynomial by
+//    dropping the last X value in XD, and adding a new X value to the
+//    beginning of the Xd array, suitably modifying the coefficients stored
+//    in YD.
+//
+//    The representation of the polynomial is changed, but the polynomial itself
+//    should be identical.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 June 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Carl deBoor,
+//    A Practical Guide to Splines,
+//    Springer, 2001,
+//    ISBN: 0387953663,
+//    LC: QA1.A647.v27.
+//
+//  Parameters:
+//
+//    Input, int ND, the number of divided difference coefficients, and
+//    the number of entries in XD.
+//
+//    Input/output, double XD[ND], the X values used in the representation of
+//    the divided difference polynomial.  After a call to this routine, the
+//    last entry of XD has been dropped, the other
+//    entries have shifted up one index, and XV has been inserted at the
+//    beginning of the array.
+//
+//    Input/output, double YD[ND], the divided difference coefficients
+//    corresponding to the XD array.  On output, this array has been
+//    adjusted.
+//
+//    Input, double XV, a new X value which is to be used in the representation
+//    of the polynomial.  On output, XD[0] equals XV and the representation
+//    of the polynomial has been suitably changed.
+//    Note that XV does not have to be distinct from any of the original XD
+//    values.
+//
+{
+  int i;
+//
+//  Recompute the divided difference coefficients.
+//
+  for ( i = nd - 2; 0 <= i; i-- )
+  {
+    yd[i] = yd[i] + ( xv - xd[i] ) * yd[i+1];
+  }
+//
+//  Shift the X values up one position and insert XV.
+//
+  for ( i = nd - 1; 0 < i; i-- )
+  {
+    xd[i] = xd[i-1];
+  }
+
+  xd[0] = xv;
+
+  return;
+}
+//****************************************************************************80
+
+void dif_shift_zero ( int nd, double xd[], double yd[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    DIF_SHIFT_ZERO shifts a divided difference table so that all abscissas are zero.
+//
+//  Discussion:
+//
+//    When the abscissas are changed, the coefficients naturally
+//    must also be changed.
+//
+//    The resulting pair (XD, YD) still represents the
+//    same polynomial, but the entries in YD are now the
+//    standard polynomial coefficients.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 June 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Carl deBoor,
+//    A Practical Guide to Splines,
+//    Springer, 2001,
+//    ISBN: 0387953663,
+//    LC: QA1.A647.v27.
+//
+//  Parameters:
+//
+//    Input, int ND, the length of the XD and YD arrays.
+//
+//    Input/output, double XD[ND], the X values that correspond to the
+//    divided difference table.  On output, XD contains only zeroes.
+//
+//    Input/output, double YD[ND], the divided difference table
+//    for the polynomial.  On output, YD is also
+//    the coefficient array for the standard representation
+//    of the polynomial.
+//
+{
+  int i;
+  double xv;
+
+  xv = 0.0;
+
+  for ( i = 1; i <= nd; i++ )
+  {
+    webbur::dif_shift_x ( nd, xd, yd, xv );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void dif_to_r8poly ( int nd, double xd[], double yd[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    DIF_TO_R8POLY converts a divided difference table to a standard polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    21 February 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Carl deBoor,
+//    A Practical Guide to Splines,
+//    Springer, 2001,
+//    ISBN: 0387953663,
+//    LC: QA1.A647.v27.
+//
+//  Parameters:
+//
+//    Input, int ND, the number of coefficients, and abscissas.
+//
+//    Input, double XD[ND], the X values used in the divided difference
+//    representation of the polynomial.
+//
+//    Input, double YD[ND], the divided difference table.
+//
+//    Output, double C[ND], the standard form polyomial coefficients.
+//    C[0] is the constant term, and C[ND-1] is the coefficient
+//    of X^(ND-1).
+//
+{
+  int i;
+  int j;
+
+  for ( i = 0; i < nd; i++ )
+  {
+    c[i] = yd[i];
+  }
+//
+//  Recompute the divided difference coefficients.
+//
+  for ( j = 1; j <= nd - 1; j++ )
+  {
+    for ( i = 1; i <= nd - j; i++ )
+    {
+      c[nd-i-1] = c[nd-i-1] - xd[nd-i-j] * c[nd-i];
+    }
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void fejer2_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    FEJER2_COMPUTE computes a Fejer type 2 rule.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  int j;
+  double p;
+  double pi = 3.141592653589793;
+  double theta;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "FEJER2_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+  else if ( n == 1 )
+  {
+    x[0] = 0.0;
+    w[0] = 2.0;
+    return;
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] =  std::cos ( ( double ) ( n - i ) * pi
+                     / ( double ) ( n + 1 ) );
+  }
+  if ( ( n % 2 ) == 1 )
+  {
+    x[(n-1)/2] = 0.0;
+  }
+
+  if ( n == 2 )
+  {
+    w[0] = 1.0;
+    w[1] = 1.0;
+  }
+  else
+  {
+    for ( i = 0; i < n; i++ )
+    {
+      theta = ( double ) ( n - i ) * pi
+            / ( double ) ( n + 1 );
+
+      w[i] = 1.0;
+
+      for ( j = 1; j <= ( ( n - 1 ) / 2 ); j++ )
+      {
+        w[i] = w[i] - 2.0 *  std::cos ( 2.0 * ( double ) ( j ) * theta )
+          / ( double ) ( 4 * j * j - 1 );
+      }
+      p = 2.0 * ( double ) ( ( ( n + 1 ) / 2 ) ) - 1.0;
+      w[i] = w[i] -  std::cos ( ( p + 1.0 ) * theta ) / p;
+    }
+    for ( i = 0; i < n; i++ )
+    {
+      w[i] = 2.0 * w[i] / ( double ) ( n + 1 );
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void fejer2_compute_np ( int n, int np, double p[], double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    FEJER2_COMPUTE_NP computes a Fejer type 2 rule.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::fejer2_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void fejer2_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    FEJER2_COMPUTE_POINTS computes Fejer type 2 quadrature points.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int i;
+  double pi = 3.141592653589793;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "FEJER2_COMPUTE_POINTS - Fatal error!\n";
+    std::cerr << "  N < 1.\n";
+    std::exit ( 1 );
+  }
+  else if ( n == 1 )
+  {
+    x[0] = 0.0;
+  }
+  else
+  {
+    for ( i = 1; i <= n; i++ )
+    {
+      x[i-1] =  std::cos ( ( double ) ( n + 1 - i ) * pi
+                         / ( double ) ( n + 1 ) );
+    }
+    if ( ( n % 2 ) == 1 )
+    {
+      x[(n-1)/2] = 0.0;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void fejer2_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    FEJER2_COMPUTE_POINTS_NP computes Fejer type 2 quadrature points.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::fejer2_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void fejer2_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    FEJER2_COMPUTE_WEIGHTS computes Fejer type 2 quadrature weights.
+//
+//  Discussion:
+//
+//    The user must preallocate space for the output array W.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//    Walter Gautschi,
+//    Numerical Quadrature in the Presence of a Singularity,
+//    SIAM Journal on Numerical Analysis,
+//    Volume 4, Number 3, 1967, pages 357-362.
+//
+//    Joerg Waldvogel,
+//    Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules,
+//    BIT Numerical Mathematics,
+//    Volume 43, Number 1, 2003, pages 1-18.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  int j;
+  double p;
+  double pi = 3.141592653589793;
+  double theta;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "FEJER2_COMPUTE_WEIGHTS - Fatal error!\n";
+    std::cerr << "  N < 1.\n";
+    std::exit ( 1 );
+  }
+  else if ( n == 1 )
+  {
+    w[0] = 2.0;
+  }
+  else if ( n == 2 )
+  {
+    w[0] = 1.0;
+    w[1] = 1.0;
+  }
+  else
+  {
+    for ( i = 1; i <= n; i++ )
+    {
+      theta = ( double ) ( n + 1 - i ) * pi
+            / ( double ) ( n + 1 );
+
+      w[i-1] = 1.0;
+
+      for ( j = 1; j <= ( ( n - 1 ) / 2 ); j++ )
+      {
+        w[i-1] = w[i-1] - 2.0 *  std::cos ( 2.0 * ( double ) ( j ) * theta )
+          / ( double ) ( 4 * j * j - 1 );
+      }
+      p = 2.0 * ( double ) ( ( ( n + 1 ) / 2 ) ) - 1.0;
+      w[i-1] = w[i-1] -  std::cos ( ( p + 1.0 ) * theta ) / p;
+    }
+    for ( i = 0; i < n; i++ )
+    {
+      w[i] = 2.0 * w[i] / ( double ) ( n + 1 );
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void fejer2_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    FEJER2_COMPUTE_WEIGHTS_NP computes Fejer type 2 quadrature weights.
+//
+//  Discussion:
+//
+//    The user must preallocate space for the output array W.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//    Walter Gautschi,
+//    Numerical Quadrature in the Presence of a Singularity,
+//    SIAM Journal on Numerical Analysis,
+//    Volume 4, Number 3, 1967, pages 357-362.
+//
+//    Joerg Waldvogel,
+//    Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules,
+//    BIT Numerical Mathematics,
+//    Volume 43, Number 1, 2003, pages 1-18.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::fejer2_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_compute ( int order, double alpha, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_COMPUTE computes a Gegenbauer quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) (1-X^2)^ALPHA * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//    Thanks to Janiki Raman for pointing out a problem in an earlier
+//    version of the code that occurred when ALPHA was -0.5.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double ALPHA, the exponent of (1-X^2).  -1.0 < ALPHA is required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double an;
+  double *c;
+  double cc;
+  double delta;
+  double dp2;
+  int i;
+  double p1;
+  double prod;
+  double r1;
+  double r2;
+  double r3;
+  double temp;
+  double x0;
+//
+//  Check ORDER.
+//
+  if ( order < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "GEGENBAUER_COMPUTE - Fatal error!\n";
+    std::cerr << "  1 <= ORDER is required.\n";
+    std::exit ( 1 );
+  }
+  c = new double[order];
+//
+//  Check ALPHA.
+//
+  if ( alpha <= -1.0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "GEGENBAUER_COMPUTE - Fatal error!\n";
+    std::cerr << "  -1.0 < ALPHA is required.\n";
+    std::exit ( 1 );
+  }
+//
+//  Set the recursion coefficients.
+//
+  c[0] = 0.0;
+  if ( 2 <= order )
+  {
+    c[1] = 1.0 / ( 2.0 * alpha + 3.0 );
+  }
+
+  for ( i = 3; i <= order; i++ )
+  {
+    c[i-1] = ( double ) ( i - 1 )
+          * ( alpha + alpha + ( double ) ( i - 1 ) ) /
+          ( ( alpha + alpha + ( double ) ( 2 * i - 1 ) )
+          * ( alpha + alpha + ( double ) ( 2 * i - 3 ) ) );
+  }
+
+  delta = webbur::r8_gamma ( alpha         + 1.0 )
+        * webbur::r8_gamma (         alpha + 1.0 )
+        / webbur::r8_gamma ( alpha + alpha + 2.0 );
+
+  prod = 1.0;
+  for ( i = 2; i <= order; i++ )
+  {
+    prod = prod * c[i-1];
+  }
+  cc = delta * std::pow ( 2.0, alpha + alpha + 1.0 ) * prod;
+
+  for ( i = 1; i <= order; i++ )
+  {
+    if ( i == 1 )
+    {
+      an = alpha / ( double ) ( order );
+
+      r1 = ( 1.0 + alpha )
+        * ( 2.78 / ( 4.0 + ( double ) ( order * order ) )
+        + 0.768 * an / ( double ) ( order ) );
+
+      r2 = 1.0 + 2.44 * an + 1.282 * an * an;
+
+      x0 = ( r2 - r1 ) / r2;
+    }
+    else if ( i == 2 )
+    {
+      r1 = ( 4.1 + alpha ) /
+        ( ( 1.0 + alpha ) * ( 1.0 + 0.156 * alpha ) );
+
+      r2 = 1.0 + 0.06 * ( ( double ) ( order ) - 8.0 ) *
+        ( 1.0 + 0.12 * alpha ) / ( double ) ( order );
+
+      r3 = 1.0 + 0.012 * alpha *
+        ( 1.0 + 0.25 * r8_abs ( alpha ) ) / ( double ) ( order );
+
+      x0 = x0 - r1 * r2 * r3 * ( 1.0 - x0 );
+    }
+    else if ( i == 3 )
+    {
+      r1 = ( 1.67 + 0.28 * alpha ) / ( 1.0 + 0.37 * alpha );
+
+      r2 = 1.0 + 0.22 * ( ( double ) ( order ) - 8.0 )
+        / ( double ) ( order );
+
+      r3 = 1.0 + 8.0 * alpha /
+        ( ( 6.28 + alpha ) * ( double ) ( order * order ) );
+
+      x0 = x0 - r1 * r2 * r3 * ( x[0] - x0 );
+    }
+    else if ( i < order - 1 )
+    {
+      x0 = 3.0 * x[i-2] - 3.0 * x[i-3] + x[i-4];
+    }
+    else if ( i == order - 1 )
+    {
+      r1 = ( 1.0 + 0.235 * alpha ) / ( 0.766 + 0.119 * alpha );
+
+      r2 = 1.0 / ( 1.0 + 0.639
+        * ( ( double ) ( order ) - 4.0 )
+        / ( 1.0 + 0.71 * ( ( double ) ( order ) - 4.0 ) ) );
+
+      r3 = 1.0 / ( 1.0 + 20.0 * alpha / ( ( 7.5 + alpha ) *
+        ( double ) ( order * order ) ) );
+
+      x0 = x0 + r1 * r2 * r3 * ( x0 - x[i-3] );
+    }
+    else if ( i == order )
+    {
+      r1 = ( 1.0 + 0.37 * alpha ) / ( 1.67 + 0.28 * alpha );
+
+      r2 = 1.0 /
+        ( 1.0 + 0.22 * ( ( double ) ( order ) - 8.0 )
+        / ( double ) ( order ) );
+
+      r3 = 1.0 / ( 1.0 + 8.0 * alpha /
+        ( ( 6.28 + alpha ) * ( double ) ( order * order ) ) );
+
+      x0 = x0 + r1 * r2 * r3 * ( x0 - x[i-3] );
+    }
+
+    webbur::gegenbauer_root ( &x0, order, alpha, &dp2, &p1, c );
+
+    x[i-1] = x0;
+    w[i-1] = cc / ( dp2 * p1 );
+  }
+//
+//  Reverse the order of the values.
+//
+  for ( i = 1; i <= order/2; i++ )
+  {
+    temp       = x[i-1];
+    x[i-1]     = x[order-i];
+    x[order-i] = temp;
+  }
+
+  for ( i = 1; i <=order/2; i++ )
+  {
+    temp       = w[i-1];
+    w[i-1]     = w[order-i];
+    w[order-i] = temp;
+  }
+
+  delete [] c;
+
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_compute_np ( int order, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_COMPUTE_NP computes a Gegenbauer quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) (1-X^2)^ALPHA * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//    Thanks to Janiki Raman for pointing out a problem in an earlier
+//    version of the code that occurred when ALPHA was -0.5.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA = the exponent of (1-X^2).  -1.0 < ALPHA is required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gegenbauer_compute ( order, alpha, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_compute_points ( int order, double alpha, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_COMPUTE_POINTS computes Gegenbauer quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double ALPHA, the exponent of (1-X^2).  -1.0 < ALPHA is required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double *w;
+
+  w = new double[order];
+
+  webbur::gegenbauer_compute ( order, alpha, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_compute_points_np ( int order, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_COMPUTE_POINTS_NP computes Gegenbauer quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA = the exponent of (1-X^2).  -1.0 < ALPHA is required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gegenbauer_compute_points ( order, alpha, x );
+
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_compute_weights ( int order, double alpha, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_COMPUTE_WEIGHTS computes Gegenbauer quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double ALPHA, the exponent of (1-X^2).  -1.0 < ALPHA is required.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *x;
+
+  x = new double[order];
+
+  webbur::gegenbauer_compute ( order, alpha, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_compute_weights_np ( int order, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_COMPUTE_WEIGHTS_NP computes Gegenbauer quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double P[1], contains parameters.
+//    P[0] = ALPHA = the exponent of (1-X^2).  -1.0 < ALPHA is required.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gegenbauer_compute_weights ( order, alpha, w );
+
+  return;
+}
+//****************************************************************************80
+
+double gegenbauer_integral ( int expon, double alpha )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_INTEGRAL integrates a monomial with Gegenbauer weight.
+//
+//  Discussion:
+//
+//    VALUE = Integral ( -1 <= X <= +1 ) x^EXPON (1-x^2)^ALPHA dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent.
+//
+//    Input, double ALPHA, the exponent of (1-X^2) in the weight factor.
+//
+//    Output, double GEGENBAUER_INTEGRAL, the value of the integral.
+//
+{
+  double arg1;
+  double arg2;
+  double arg3;
+  double arg4;
+  double c;
+  double value;
+  double value1;
+
+  if ( ( expon % 2 ) == 1 )
+  {
+    value = 0.0;
+    return value;
+  }
+
+  c = ( double ) ( expon );
+
+  arg1 = - alpha;
+  arg2 =   1.0 + c;
+  arg3 =   2.0 + alpha + c;
+  arg4 = - 1.0;
+
+  value1 = webbur::r8_hyper_2f1 ( arg1, arg2, arg3, arg4 );
+
+  value = webbur::r8_gamma ( 1.0 + c ) * 2.0
+    * webbur::r8_gamma ( 1.0 + alpha  ) * value1
+    / webbur::r8_gamma ( 2.0 + alpha  + c );
+
+  return value;
+}
+//****************************************************************************80
+
+void gegenbauer_recur ( double *p2, double *dp2, double *p1, double x,
+  int order, double alpha, double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_RECUR evaluates a Gegenbauer polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Output, double *P2, the value of J(ORDER)(X).
+//
+//    Output, double *DP2, the value of J'(ORDER)(X).
+//
+//    Output, double *P1, the value of J(ORDER-1)(X).
+//
+//    Input, double X, the point at which polynomials are evaluated.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double ALPHA, the exponents of (1-X^2).
+//
+//    Input, double C[ORDER], the recursion coefficients.
+//
+{
+  double dp0;
+  double dp1;
+  int i;
+  double p0;
+
+  *p1 = 1.0;
+  dp1 = 0.0;
+
+  *p2 = x;
+  *dp2 = 1.0;
+
+  for ( i = 2; i <= order; i++ )
+  {
+    p0 = *p1;
+    dp0 = dp1;
+
+    *p1 = *p2;
+    dp1 = *dp2;
+
+    *p2 = x *  ( *p1 ) - c[i-1] * p0;
+    *dp2 = x * dp1 + ( *p1 ) - c[i-1] * dp0;
+  }
+  return;
+}
+//****************************************************************************80
+
+void gegenbauer_root ( double *x, int order, double alpha, double *dp2,
+  double *p1, double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEGENBAUER_ROOT improves an approximate root of a Gegenbauer polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input/output, double *X, the approximate root, which
+//    should be improved on output.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double ALPHA, the exponents of (1-X^2).
+//
+//    Output, double *DP2, the value of J'(ORDER)(X).
+//
+//    Output, double *P1, the value of J(ORDER-1)(X).
+//
+//    Input, double C[ORDER], the recursion coefficients.
+//
+{
+  double d;
+  double eps;
+  double p2;
+  int step;
+  int step_max = 10;
+
+  eps = webbur::r8_epsilon ( );
+
+  for ( step = 1; step <= step_max; step++ )
+  {
+    webbur::gegenbauer_recur ( &p2, dp2, p1, *x, order, alpha, c );
+
+    d = p2 / ( *dp2 );
+    *x = *x - d;
+
+    if ( webbur::r8_abs ( d ) <= eps * ( webbur::r8_abs ( *x ) + 1.0 ) )
+    {
+      return;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_compute ( int n, double alpha, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_COMPUTE computes a generalized Gauss-Hermite quadrature rule.
+//
+//  Discussion:
+//
+//    The code uses an algorithm by Elhay and Kautsky.
+//
+//    The integral:
+//
+//      integral ( -oo < x < +oo ) |x|^alpha exp(-x^2) f(x) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    30 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//  Parameters:
+//
+//    Input, int N, the number of abscissas.
+//
+//    Input, double ALPHA, the parameter.
+//    -1.0 < ALPHA.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *bj;
+  int i;
+  double i_r8;
+  double zemu;
+//
+//  Define the zero-th moment.
+//
+  zemu = webbur::r8_gamma ( ( alpha + 1.0 ) / 2.0 );
+//
+//  Define the Jacobi matrix.
+//
+  bj = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    i_r8 = ( double ) ( i + 1 );
+    if ( ( i % 2 ) == 0 )
+    {
+      bj[i] = ( i_r8 + alpha ) / 2.0;
+    }
+    else
+    {
+      bj[i] = i_r8 / 2.0;
+    }
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    bj[i] = std::sqrt ( bj[i] );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = 0.0;
+  }
+
+  w[0] = std::sqrt ( zemu );
+  for ( i = 1; i < n; i++ )
+  {
+    w[i] = 0.0;
+  }
+//
+//  Diagonalize the Jacobi matrix.
+//
+  webbur::imtqlx ( n, x, bj, w );
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = w[i] * w[i];
+  }
+
+  delete [] bj;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_compute_np ( int order, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_COMPUTE_NP computes a Generalized Hermite quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -oo < x < +oo ) |x|^ALPHA exp(-x^2) f(x) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA, the exponent of the X factor. -1.0 < ALPHA.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gen_hermite_compute ( order, alpha, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_compute_points ( int order, double alpha, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_COMPUTE_POINTS computes Generalized Hermite quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    -1.0 < ALPHA.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double *w;
+
+  w = new double[order];
+
+  webbur::gen_hermite_compute ( order, alpha, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_compute_points_np ( int order, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_COMPUTE_POINTS_NP: Generalized Hermite quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA, the exponent of the X factor. -1.0 < ALPHA.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gen_hermite_compute_points ( order, alpha, x );
+
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_compute_weights ( int order, double alpha, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_COMPUTE_WEIGHTS computes Generalized Hermite quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    -1.0 < ALPHA.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *x;
+
+  x = new double[order];
+
+  webbur::gen_hermite_compute ( order, alpha, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_compute_weights_np ( int order, int np, double p[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_COMPUTE_WEIGHTS_NP: Generalized Hermite quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA, the exponent of the X factor. -1.0 < ALPHA.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gen_hermite_compute_weights ( order, alpha, w );
+
+  return;
+}
+//****************************************************************************80
+
+void gen_hermite_dr_compute ( int order, double alpha, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_DR_COMPUTE computes a Generalized Hermite quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -oo < x < +oo ) |x|^ALPHA exp(-x^2) f(x) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    -1.0 < ALPHA.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha_laguerre;
+  double arg;
+  int i;
+  int order_laguerre;
+  double *w_laguerre;
+  double *x_laguerre;
+
+  if ( order < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "GEN_HERMITE_DR_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of ORDER = " << order << "\n";
+    std::exit ( 1 );
+  }
+
+  if ( order == 1 )
+  {
+    arg = ( alpha + 1.0 ) / 2.0;
+    x[0] = 0.0;
+    w[0] = webbur::r8_gamma ( arg );
+    return;
+  }
+
+  if ( ( order % 2 ) == 0 )
+  {
+    order_laguerre = order / 2;
+    alpha_laguerre = ( alpha - 1.0 ) / 2.0;
+  }
+  else
+  {
+    order_laguerre = ( order - 1 ) / 2;
+    alpha_laguerre = ( alpha + 1.0 ) / 2.0;
+  }
+
+  w_laguerre = new double[order_laguerre];
+  x_laguerre = new double[order_laguerre];
+
+  webbur::gen_laguerre_ss_compute ( order_laguerre, alpha_laguerre, x_laguerre,
+    w_laguerre );
+
+  if ( ( order % 2 ) == 0 )
+  {
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      x[i] = - std::sqrt ( x_laguerre[order_laguerre-1-i] );
+    }
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      x[order_laguerre+i] = std::sqrt ( x_laguerre[i] );
+	}
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      w[i] = 0.5 * w_laguerre[order_laguerre-1-i];
+    }
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      w[order_laguerre+i] = 0.5 * w_laguerre[i];
+    }
+  }
+  else if ( ( order % 2 ) == 1 )
+  {
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      x[i] = - std::sqrt ( x_laguerre[order_laguerre-1-i] );
+    }
+    x[order_laguerre] = 0.0;
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      x[order_laguerre+1+i] = std::sqrt ( x_laguerre[i] );
+	}
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      w[i] = 0.5 * w_laguerre[order_laguerre-1-i] / x_laguerre[order_laguerre-1-i];
+    }
+
+    arg = ( alpha + 1.0 ) / 2.0;
+    w[order_laguerre] = webbur::r8_gamma ( arg );
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      w[order_laguerre] = w[order_laguerre] - w_laguerre[i] / x_laguerre[i];
+    }
+
+    for ( i = 0; i < order_laguerre; i++ )
+    {
+      w[order_laguerre+1+i] = 0.5 * w_laguerre[i] / x_laguerre[i];
+    }
+  }
+  delete [] w_laguerre;
+  delete [] x_laguerre;
+
+  return;
+}
+//****************************************************************************80
+
+double gen_hermite_integral ( int expon, double alpha )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_HERMITE_INTEGRAL evaluates a monomial Generalized Hermite integral.
+//
+//  Discussion:
+//
+//    H(n,alpha) = Integral ( -oo < x < +oo ) x^n |x|^alpha exp(-x^2) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent of the monomial.
+//    0 <= EXPON.
+//
+//    Input, double ALPHA, the exponent of |X| in the weight function.
+//    -1.0 < ALPHA.
+//
+//    Output, double GEN_HERMITE_INTEGRAL, the value of the integral.
+//
+{
+  double a;
+  double arg;
+  double value;
+
+  if ( ( expon % 2 ) == 1 )
+  {
+    value = 0.0;
+  }
+  else
+  {
+    a = alpha + ( double ) ( expon );
+    if ( a <= - 1.0 )
+    {
+      value = - webbur::r8_huge ( );
+    }
+    else
+    {
+      arg = ( a + 1.0 ) / 2.0;
+      value = webbur::r8_gamma ( arg );
+    }
+  }
+  return value;
+}
+//****************************************************************************80
+
+void gen_laguerre_compute ( int n, double alpha, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_COMPUTE: generalized Gauss-Laguerre quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( 0 <= x < +oo ) exp ( - x ) * x^alpha * f(x) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    The integral:
+//
+//      integral ( 0 <= x < +oo ) x^alpha * f(x) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * exp ( x(i) ) * f ( x(i) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    ALPHA must be nonnegative.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *bj;
+  int i;
+  double i_r8;
+  double zemu;
+//
+//  Define the zero-th moment.
+//
+  zemu = webbur::r8_gamma ( alpha + 1.0 );
+//
+//  Define the Jacobi matrix.
+//
+  bj = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    i_r8 = ( double ) ( i + 1 );
+    bj[i] = std::sqrt ( i_r8 * ( i_r8 + alpha ) );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    i_r8 = ( double ) ( i + 1 );
+    x[i] = 2.0 * i_r8 - 1.0 + alpha;
+  }
+
+  w[0] = std::sqrt ( zemu );
+
+  for ( i = 1; i < n; i++ )
+  {
+    w[i] = 0.0;
+  }
+//
+//  Diagonalize the Jacobi matrix.
+//
+  imtqlx ( n, x, bj, w );
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = w[i] * w[i];
+  }
+
+  delete [] bj;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_compute_np ( int order, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_COMPUTE_NP computes a Generalized Laguerre quadrature rule.
+//
+//  Discussion:
+//
+//    In the simplest case, ALPHA is 0, and we are approximating the
+//    integral from 0 to +oo of exp(-X) * F(X).  When this is so,
+//    it is easy to modify the rule to approximate the integral from
+//    A to +oo as well.
+//
+//    If ALPHA is nonzero, then there is no simple way to extend the
+//    rule to approximate the integral from A to +oo.  The simplest
+//    procedures would be to approximate the integral from 0 to A.
+//
+//    If the integral to approximate is:
+//
+//        Integral ( A <= X < +oo ) exp ( - X ) * F(X) dX
+//      or
+//        Integral ( 0 <= X < +oo ) exp ( - X ) * X^ALPHA * F(X) dX
+//
+//    then the quadrature rule is:
+//
+//      exp ( - A ) * Sum ( 1 <= I <= ORDER ) W(I) * F ( A+X(I) )
+//    or
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//
+//    If the integral to approximate is:
+//
+//        Integral ( A <= X < +oo ) F(X) dX
+//      or
+//        Integral ( 0 <= X < +oo ) X^ALPHA * F(X) dX
+//
+//    then the quadrature rule is:
+//
+//      exp ( - A ) * Sum ( 1 <= I <= ORDER )
+//        W(I) * exp(A+X(I)) * F ( A+X(I) )
+//    or
+//      Sum ( 1 <= I <= ORDER ) W(I) * exp(X(I)) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double P[1], contains parameters.
+//    P[0] = ALPHA, the exponent of the X factor.
+//    Set ALPHA = 0.0 for the simplest rule.
+//    ALPHA must be nonnegative.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gen_laguerre_compute ( order, alpha, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_compute_points ( int order, double alpha, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_COMPUTE_POINTS: Generalized Laguerre quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 March 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    Set ALPHA = 0.0 for the simplest rule.
+//    ALPHA must be nonnegative.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double *w;
+
+  w = new double[order];
+
+  webbur::gen_laguerre_compute ( order, alpha, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_compute_points_np ( int order, int np, double p[],
+  double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_COMPUTE_POINTS_NP: Generalized Laguerre quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA, the exponent of the X factor.
+//    Set ALPHA = 0.0 for the simplest rule.
+//    ALPHA must be nonnegative.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gen_laguerre_compute_points ( order, alpha, x );
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_compute_weights ( int order, double alpha, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_COMPUTE_WEIGHTS: Generalized Laguerre quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    Set ALPHA = 0.0 for the simplest rule.
+//    ALPHA must be nonnegative.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *x;
+
+  x = new double[order];
+
+  webbur::gen_laguerre_compute ( order, alpha, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_compute_weights_np ( int order, int np, double p[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_COMPUTE_WEIGHTS_NP: Generalized Laguerre quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], contains parameters.
+//    P[0] = ALPHA, the exponent of the X factor.
+//    Set ALPHA = 0.0 for the simplest rule.
+//    ALPHA must be nonnegative.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+
+  alpha = p[0];
+
+  webbur::gen_laguerre_compute_weights ( order, alpha, w );
+
+  return;
+}
+//****************************************************************************80
+
+double gen_laguerre_integral ( int expon, double alpha )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_INTEGRAL evaluates a monomial Generalized Laguerre integral.
+//
+//  Discussion:
+//
+//    L(n,alpha) = Integral ( 0 <= x < +oo ) x^n * x^alpha exp(-x) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    20 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent of the monomial.
+//    0 <= EXPON.
+//
+//    Input, double ALPHA, the exponent of X in the weight function.
+//    -1.0 < ALPHA.
+//
+//    Output, double GEN_LAGUERRE_INTEGRAL, the value of the integral.
+//
+{
+  double arg;
+  double value;
+
+  arg = alpha + ( double ) ( expon + 1.0 );
+  value = webbur::r8_gamma ( arg );
+
+  return value;
+}
+//****************************************************************************80
+
+void gen_laguerre_ss_compute ( int order, double alpha, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_SS_COMPUTE computes a Generalized Laguerre quadrature rule.
+//
+//  Discussion:
+//
+//    In the simplest case, ALPHA is 0, and we are approximating the
+//    integral from 0 to +oo of exp(-X) * F(X).  When this is so,
+//    it is easy to modify the rule to approximate the integral from
+//    A to +oo as well.
+//
+//    If ALPHA is nonzero, then there is no simple way to extend the
+//    rule to approximate the integral from A to +oo.  The simplest
+//    procedures would be to approximate the integral from 0 to A.
+//
+//    If the integral to approximate is:
+//
+//        Integral ( A <= X < +oo ) exp ( - X ) * F(X) dX
+//      or
+//        Integral ( 0 <= X < +oo ) exp ( - X ) * X^ALPHA * F(X) dX
+//
+//    then the quadrature rule is:
+//
+//      exp ( - A ) * Sum ( 1 <= I <= ORDER ) W(I) * F ( A+X(I) )
+//    or
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//
+//    If the integral to approximate is:
+//
+//        Integral ( A <= X < +oo ) F(X) dX
+//      or
+//        Integral ( 0 <= X < +oo ) X^ALPHA * F(X) dX
+//
+//    then the quadrature rule is:
+//
+//      exp ( - A ) * Sum ( 1 <= I <= ORDER )
+//        W(I) * exp(A+X(I)) * F ( A+X(I) )
+//    or
+//      Sum ( 1 <= I <= ORDER ) W(I) * exp(X(I)) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//    Set ALPHA = 0.0 for the simplest rule.
+//    ALPHA must be nonnegative.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *b;
+  double *c;
+  double cc;
+  double dp2;
+  int i;
+  double p1;
+  double prod;
+  double r1;
+  double r2;
+  double ratio;
+  double x0;
+
+  if ( order < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "GEN_LAGUERRE_SS_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of ORDER = " << order << "\n";
+    std::exit ( 1 );
+  }
+
+  b = new double[order];
+  c = new double[order];
+//
+//  Set the recursion coefficients.
+//
+  for ( i = 0; i < order; i++ )
+  {
+    b[i] = ( alpha + ( double ) ( 2 * i + 1 ) );
+  }
+
+  for ( i = 0; i < order; i++ )
+  {
+    c[i] = ( double ) ( i ) * ( alpha + ( double ) ( i ) );
+  }
+  prod = 1.0;
+  for ( i = 1; i < order; i++ )
+  {
+    prod = prod * c[i];
+  }
+  cc = webbur::r8_gamma ( alpha + 1.0 ) * prod;
+
+  for ( i = 0; i < order; i++ )
+  {
+//
+//  Compute an estimate for the root.
+//
+    if ( i == 0 )
+    {
+      x0 = ( 1.0 + alpha ) * ( 3.0+ 0.92 * alpha ) /
+        ( 1.0 + 2.4 * ( double ) ( order ) + 1.8 * alpha );
+    }
+    else if ( i == 1 )
+    {
+      x0 = x0 + ( 15.0 + 6.25 * alpha ) /
+        ( 1.0 + 0.9 * alpha + 2.5 * ( double ) ( order ) );
+    }
+    else
+    {
+      r1 = ( 1.0 + 2.55 * ( double ) ( i - 1 ) )
+        / ( 1.9 * ( double ) ( i - 1 ) );
+
+      r2 = 1.26 * ( double ) ( i - 1 ) * alpha /
+        ( 1.0 + 3.5 * ( double ) ( i - 1 ) );
+
+      ratio = ( r1 + r2 ) / ( 1.0 + 0.3 * alpha );
+
+      x0 = x0 + ratio * ( x0 - x[i-2] );
+    }
+//
+//  Use iteration to find the root.
+//
+    webbur::gen_laguerre_ss_root ( &x0, order, alpha, &dp2, &p1, b, c );
+//
+//  Set the abscissa and weight.
+//
+    x[i] = x0;
+    w[i] = ( cc / dp2 ) / p1;
+  }
+
+  delete [] b;
+  delete [] c;
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_ss_recur ( double *p2, double *dp2, double *p1, double x,
+  int order, double alpha, double b[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_SS_RECUR evaluates a Generalized Laguerre polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Output, double *P2, the value of L(ORDER)(X).
+//
+//    Output, double *DP2, the value of L'(ORDER)(X).
+//
+//    Output, double *P1, the value of L(ORDER-1)(X).
+//
+//    Input, double X, the point at which polynomials are evaluated.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double ALPHA, the exponent of the X factor in the
+//    integrand.
+//
+//    Input, double B[ORDER], C[ORDER], the recursion coefficients.
+//
+{
+  double dp0;
+  double dp1;
+  int i;
+  double p0;
+
+  *p1 = 1.0;
+  dp1 = 0.0;
+
+  *p2 = x - alpha - 1.0;
+  *dp2 = 1.0;
+
+  for ( i = 1; i < order; i++ )
+  {
+    p0 = *p1;
+    dp0 = dp1;
+
+    *p1 = *p2;
+    dp1 = *dp2;
+
+    *p2  = ( x - b[i] ) * ( *p1 ) - c[i] * p0;
+    *dp2 = ( x - b[i] ) * dp1 + ( *p1 ) - c[i] * dp0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void gen_laguerre_ss_root ( double *x, int order, double alpha, double *dp2,
+  double *p1, double b[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    GEN_LAGUERRE_SS_ROOT improves a root of a Generalized Laguerre polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input/output, double *X, the approximate root, which
+//    should be improved on output.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double ALPHA, the exponent of the X factor.
+//
+//    Output, double *DP2, the value of L'(ORDER)(X).
+//
+//    Output, double *P1, the value of L(ORDER-1)(X).
+//
+//    Input, double B[ORDER], C[ORDER], the recursion coefficients.
+//
+{
+  double d;
+  double eps;
+  double p2;
+  int step;
+  int step_max = 10;
+
+  eps = webbur::r8_epsilon ( );
+
+  for ( step = 1; step <= step_max; step++ )
+  {
+    webbur::gen_laguerre_ss_recur ( &p2, dp2, p1, *x, order, alpha, b, c );
+
+    d = p2 / ( *dp2 );
+    *x = *x - d;
+
+    if ( webbur::r8_abs ( d ) <= eps * ( webbur::r8_abs ( *x ) + 1.0 ) )
+    {
+      break;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void hc_compute_weights_from_points ( int nhalf, double xhalf[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HC_COMPUTE_WEIGHTS_FROM_POINTS: Hermite-Cubic weights, user-supplied points.
+//
+//  Discussion:
+//
+//    An interval [A,B] has been divided by NHALF points X; at each
+//    point both function and derivative information is available.
+//
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//
+//    A quadrature rule is determined for the interpolant.
+//
+//    There will be N=2*NHALF weights.  If the quadrature rule is to be written
+//    out, one would normally list each point twice, so that the number of points
+//    and weights are equal.  The listing of the same point value twice is an
+//    implicit indication that both function and derivative values should be
+//    used.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    28 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int NHALF, the number of points, not counting repetitions.
+//
+//    Input, double XHALF[NHALF], the points, without repetition.
+//
+//    Output, double W[2*NHALF], the weights.  The first two weights are
+//    associated with the first point, and so on.
+//
+{
+  int j;
+
+  w[0+0*2] =    0.5 * ( xhalf[1] - xhalf[0] );
+  w[1+0*2] = std::pow ( xhalf[1] - xhalf[0], 2 ) / 12.0;
+
+  for ( j = 1; j < nhalf - 1; j++ )
+  {
+    w[0+j*2] = 0.5 * ( xhalf[j+1] - xhalf[j-1] );
+    w[1+j*2] =       ( xhalf[j+1] - xhalf[j-1] )
+                   * ( xhalf[j+1] - 2.0 * xhalf[j] + xhalf[j-1] ) / 12.0;
+  }
+
+  w[0+(nhalf-1)*2] =      0.5 * ( xhalf[nhalf-1] - xhalf[nhalf-2]   );
+  w[1+(nhalf-1)*2] = - std::pow ( xhalf[nhalf-2] - xhalf[nhalf-1], 2 ) / 12.0;
+
+  return;
+}
+//****************************************************************************80
+
+void hcc_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCC_COMPUTE computes a Hermite-Cubic-Chebyshev-Spacing quadrature rule.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into Chebyshev-spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int nhalf;
+  double *xhalf;
+
+  nhalf = n / 2;
+  xhalf = new double[nhalf];
+
+  webbur::clenshaw_curtis_compute_points ( nhalf, xhalf );
+  webbur::r8vec_stutter ( nhalf, xhalf, 2, x );
+  webbur::hc_compute_weights_from_points ( nhalf, xhalf, w );
+
+  delete [] xhalf;
+
+  return;
+}
+//****************************************************************************80
+
+void hcc_compute_np ( int n, int np, double p[], double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCC_COMPUTE_NP computes a Hermite-Cubic-Chebyshev-Spacing quadrature rule.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into Chebyshev-spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hcc_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hcc_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCC_COMPUTE_POINTS computes Hermite-Cubic-Chebyshev-Spacing quadrature points.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into Chebyshev-spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int nhalf;
+  double *xhalf;
+
+  if ( ( n % 2 ) != 0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "HCC_COMPUTE_POINTS - Fatal error!\n";
+    std::cerr << "  Order of rule N is not even.\n";
+    std::exit ( 1 );
+  }
+
+  nhalf = n / 2;
+  xhalf = new double[nhalf];
+
+  webbur::clenshaw_curtis_compute_points ( nhalf, xhalf );
+  webbur::r8vec_stutter ( nhalf, xhalf, 2, x );
+
+  delete [] xhalf;
+
+  return;
+}
+//****************************************************************************80
+
+void hcc_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCC_COMPUTE_POINTS_NP: Hermite-Cubic-Chebyshev-Spacing quadrature points.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into Chebyshev-spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::hcc_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void hcc_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCC_COMPUTE_WEIGHTS: Hermite-Cubic-Chebyshev-Spacing quadrature weights.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into Chebyshev-spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int nhalf;
+  double *xhalf;
+
+  if ( ( n % 2 ) != 0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "HCC_COMPUTE_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Order of rule N is not even.\n";
+    std::exit ( 1 );
+  }
+
+  nhalf = n / 2;
+  xhalf = new double[nhalf];
+
+  webbur::hc_compute_weights_from_points ( nhalf, xhalf, w );
+
+  delete [] xhalf;
+
+  return;
+}
+//****************************************************************************80
+
+void hcc_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCC_COMPUTE_WEIGHTS_NP: Hermite-Cubic-Chebyshev-Spacing quadrature weights.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into Chebyshev-spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hcc_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hce_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCE_COMPUTE computes a Hermite-Cubic-Equal-Spacing quadrature rule.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into equally spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    28 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double a_high = 1.0;
+  double a_low = 0.0;
+  int nhalf;
+  double *xhalf;
+
+  a_low = 0.0;
+  a_high = 1.0;
+
+  nhalf = n / 2;
+
+  xhalf = webbur::r8vec_linspace_new ( nhalf, a_low, a_high );
+  webbur::r8vec_stutter ( nhalf, xhalf, 2, x );
+  webbur::hc_compute_weights_from_points ( nhalf, xhalf, w );
+
+  delete [] xhalf;
+
+  return;
+}
+//****************************************************************************80
+
+void hce_compute_np ( int n, int np, double p[], double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCE_COMPUTE_NP computes a Hermite-Cubic-Equal-Spacing quadrature rule.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into equally spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hce_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hce_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCE_COMPUTE_POINTS computes Hermite-Cubic-Equal-Spacing quadrature points.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into equally spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int i;
+  int j;
+  int m;
+  double x_value;
+
+  if ( ( n % 2 ) != 0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "HCE_COMPUTE_POINTS - Fatal error!\n";
+    std::cerr << "  Order of rule N is not even.\n";
+    std::exit ( 1 );
+  }
+  m = n / 2;
+
+  for ( j = 0; j < m; j++ )
+  {
+    x_value = ( double ) ( 2 * j + 1 - m ) / ( double ) ( m - 1 );
+    for ( i = 0; i < 2; i++ )
+    {
+      x[i+j*2] = x_value;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void hce_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCE_COMPUTE_POINTS_NP: Hermite-Cubic-Equal-Spacing quadrature points.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into equally spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::hce_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void hce_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCE_COMPUTE_WEIGHTS: Hermite-Cubic-Equal-Spacing quadrature weights.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into equally spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int nhalf;
+  double *xhalf;
+
+  if ( ( n % 2 ) != 0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "HCE_COMPUTE_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Order of rule N is not even.\n";
+    std::exit ( 1 );
+  }
+
+  nhalf = n / 2;
+  xhalf = new double[nhalf];
+
+  webbur::hc_compute_weights_from_points ( nhalf, xhalf, w );
+
+  delete [] xhalf;
+
+  return;
+}
+//****************************************************************************80
+
+void hce_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HCE_COMPUTE_WEIGHTS_NP: Hermite-Cubic-Equal-Spacing quadrature weights.
+//
+//  Discussion:
+//
+//    For the HCE rule, we assume that an interval has been divided by
+//    M nodes X into equally spaced subintervals, and that at each
+//    abscissa both function and derivative information is available.
+//    The piecewise cubic Hermite interpolant is constructed for this data.
+//    The quadrature rule uses N = 2 * M abscissas, where each node is
+//    listed twice, and the weights occur in pairs, with the first multiplying
+//    the function value and the second the derivative.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hce_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_COMPUTE computes a Gauss-Hermite quadrature rule.
+//
+//  Discussion:
+//
+//    The code uses an algorithm by Elhay and Kautsky.
+//
+//    The abscissas are the zeros of the N-th order Hermite polynomial.
+//
+//    The integral:
+//
+//      integral ( -oo < x < +oo ) exp ( - x * x ) * f(x) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//  Parameters:
+//
+//    Input, int N, the number of abscissas.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double arg;
+  double *bj;
+  int i;
+  double zemu;
+//
+//  Define the zero-th moment.
+//
+  arg = 0.5;
+  zemu = webbur::r8_gamma ( arg );
+//
+//  Define the Jacobi matrix.
+//
+  bj = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    bj[i] = std::sqrt ( ( double ) ( i + 1 ) / 2.0 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = 0.0;
+  }
+
+  w[0] = std::sqrt ( zemu );
+  for ( i = 1; i < n; i++ )
+  {
+    w[i] = 0.0;
+  }
+//
+//  Diagonalize the Jacobi matrix.
+//
+  webbur::imtqlx ( n, x, bj, w );
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = w[i] * w[i];
+  }
+
+  delete [] bj;
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_compute_np ( int order, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_COMPUTE_NP computes a Hermite quadrature rule.
+//
+//  Discussion:
+//
+//    The abscissas are the zeros of the N-th order Hermite polynomial.
+//
+//    The integral:
+//
+//      Integral ( -oo < X < +oo ) exp ( - X * X ) * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  webbur::hermite_compute ( order, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_compute_points ( int order, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_COMPUTE_POINTS computes Hermite quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double *w;
+
+  w = new double[order];
+
+  webbur::hermite_compute ( order, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_compute_points_np ( int order, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_COMPUTE_POINTS_NP computes Hermite quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  webbur::hermite_compute_points ( order, x );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_compute_weights ( int order, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_COMPUTE_WEIGHTS computes Hermite quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *x;
+
+  x = new double[order];
+
+  webbur::hermite_compute ( order, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_compute_weights_np ( int order, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_COMPUTE_WEIGHTS_NP computes Hermite quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  webbur::hermite_compute_weights ( order, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_genz_keister_lookup ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GENZ_KEISTER_LOOKUP looks up a Genz-Keister Hermite rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+16, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and 35.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29, and 51.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 June 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Florian Heiss, Viktor Winschel,
+//    Likelihood approximation by numerical integration on sparse grids,
+//    Journal of Econometrics,
+//    Volume 144, 2008, pages 62-80.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, 35, 37, 41 or 43.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hermite_genz_keister_lookup_points ( n, x );
+  webbur::hermite_genz_keister_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_genz_keister_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GENZ_KEISTER_LOOKUP_POINTS looks up Genz-Keister Hermite abscissas.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+?, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and a final rule of order
+//    35, 37, 41 or 43.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29,
+//    with the final rule of precision 51, 55, 63 or 67.
+//
+//    Three related families begin the same way, but end with a different final
+//    rule.  As a convenience, this function includes these final rules as well:
+//
+//    Designation  Orders       Precisions
+//
+//    1+2+6+10+16, 1,3,9,19,35  1,5,15,29,51
+//    1+2+6+10+18  1,3,9,19,37  1,5,15,29,55
+//    1+2+6+10+22  1,3,9,19,41  1,5,15,29,63
+//    1+2+6+10+24  1,3,9,19,43  1,5,15,29,67
+//
+//    Some of the data in this function was kindly supplied directly by
+//    Alan Genz on 24 April 2011.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Florian Heiss, Viktor Winschel,
+//    Likelihood approximation by numerical integration on sparse grids,
+//    Journal of Econometrics,
+//    Volume 144, 2008, pages 62-80.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, 35, 37, 41, or 43.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[ 0] =   0.0000000000000000E+00;
+  }
+  else if ( n == 3 )
+  {
+    x[ 0] =  -1.2247448713915889E+00;
+    x[ 1] =   0.0000000000000000E+00;
+    x[ 2] =   1.2247448713915889E+00;
+  }
+  else if ( n == 9 )
+  {
+    x[ 0] =  -2.9592107790638380E+00;
+    x[ 1] =  -2.0232301911005157E+00;
+    x[ 2] =  -1.2247448713915889E+00;
+    x[ 3] =  -5.2403354748695763E-01;
+    x[ 4] =   0.0000000000000000E+00;
+    x[ 5] =   5.2403354748695763E-01;
+    x[ 6] =   1.2247448713915889E+00;
+    x[ 7] =   2.0232301911005157E+00;
+    x[ 8] =   2.9592107790638380E+00;
+  }
+  else if ( n == 19 )
+  {
+    x[ 0] =  -4.4995993983103881E+00;
+    x[ 1] =  -3.6677742159463378E+00;
+    x[ 2] =  -2.9592107790638380E+00;
+    x[ 3] =  -2.2665132620567876E+00;
+    x[ 4] =  -2.0232301911005157E+00;
+    x[ 5] =  -1.8357079751751868E+00;
+    x[ 6] =  -1.2247448713915889E+00;
+    x[ 7] =  -8.7004089535290285E-01;
+    x[ 8] =  -5.2403354748695763E-01;
+    x[ 9] =   0.0000000000000000E+00;
+    x[10] =   5.2403354748695763E-01;
+    x[11] =   8.7004089535290285E-01;
+    x[12] =   1.2247448713915889E+00;
+    x[13] =   1.8357079751751868E+00;
+    x[14] =   2.0232301911005157E+00;
+    x[15] =   2.2665132620567876E+00;
+    x[16] =   2.9592107790638380E+00;
+    x[17] =   3.6677742159463378E+00;
+    x[18] =   4.4995993983103881E+00;
+  }
+  else if ( n == 35 )
+  {
+    x[ 0] =  -6.3759392709822356E+00;
+    x[ 1] =  -5.6432578578857449E+00;
+    x[ 2] =  -5.0360899444730940E+00;
+    x[ 3] =  -4.4995993983103881E+00;
+    x[ 4] =  -4.0292201405043713E+00;
+    x[ 5] =  -3.6677742159463378E+00;
+    x[ 6] =  -3.3491639537131945E+00;
+    x[ 7] =  -2.9592107790638380E+00;
+    x[ 8] =  -2.5705583765842968E+00;
+    x[ 9] =  -2.2665132620567876E+00;
+    x[10] =  -2.0232301911005157E+00;
+    x[11] =  -1.8357079751751868E+00;
+    x[12] =  -1.5794121348467671E+00;
+    x[13] =  -1.2247448713915889E+00;
+    x[14] =  -8.7004089535290285E-01;
+    x[15] =  -5.2403354748695763E-01;
+    x[16] =  -1.7606414208200893E-01;
+    x[17] =   0.0000000000000000E+00;
+    x[18] =   1.7606414208200893E-01;
+    x[19] =   5.2403354748695763E-01;
+    x[20] =   8.7004089535290285E-01;
+    x[21] =   1.2247448713915889E+00;
+    x[22] =   1.5794121348467671E+00;
+    x[23] =   1.8357079751751868E+00;
+    x[24] =   2.0232301911005157E+00;
+    x[25] =   2.2665132620567876E+00;
+    x[26] =   2.5705583765842968E+00;
+    x[27] =   2.9592107790638380E+00;
+    x[28] =   3.3491639537131945E+00;
+    x[29] =   3.6677742159463378E+00;
+    x[30] =   4.0292201405043713E+00;
+    x[31] =   4.4995993983103881E+00;
+    x[32] =   5.0360899444730940E+00;
+    x[33] =   5.6432578578857449E+00;
+    x[34] =   6.3759392709822356E+00;
+  }
+  else if ( n == 37 )
+  {
+    x[ 0] =  -6.853200069757519;
+    x[ 1] =  -6.124527854622158;
+    x[ 2] =  -5.521865209868350;
+    x[ 3] =  -4.986551454150765;
+    x[ 4] =  -4.499599398310388;
+    x[ 5] =  -4.057956316089741;
+    x[ 6] =  -3.667774215946338;
+    x[ 7] =  -3.315584617593290;
+    x[ 8] =  -2.959210779063838;
+    x[ 9] =  -2.597288631188366;
+    x[10] =  -2.266513262056788;
+    x[11] =  -2.023230191100516;
+    x[12] =  -1.835707975175187;
+    x[13] =  -1.561553427651873;
+    x[14] =  -1.224744871391589;
+    x[15] =  -0.870040895352903;
+    x[16] =  -0.524033547486958;
+    x[17] =  -0.214618180588171;
+    x[18] =   0.000000000000000;
+    x[19] =   0.214618180588171;
+    x[20] =   0.524033547486958;
+    x[21] =   0.870040895352903;
+    x[22] =   1.224744871391589;
+    x[23] =   1.561553427651873;
+    x[24] =   1.835707975175187;
+    x[25] =   2.023230191100516;
+    x[26] =   2.266513262056788;
+    x[27] =   2.597288631188366;
+    x[28] =   2.959210779063838;
+    x[29] =   3.315584617593290;
+    x[30] =   3.667774215946338;
+    x[31] =   4.057956316089741;
+    x[32] =   4.499599398310388;
+    x[33] =   4.986551454150765;
+    x[34] =   5.521865209868350;
+    x[35] =   6.124527854622158;
+    x[36] =   6.853200069757519;
+  }
+  else if ( n == 41 )
+  {
+    x[ 0] =  -7.251792998192644;
+    x[ 1] =  -6.547083258397540;
+    x[ 2] =  -5.961461043404500;
+    x[ 3] =  -5.437443360177798;
+    x[ 4] =  -4.953574342912980;
+    x[ 5] =  -4.4995993983103881;
+    x[ 6] =  -4.070919267883068;
+    x[ 7] =  -3.6677742159463378;
+    x[ 8] =  -3.296114596212218;
+    x[ 9] =  -2.9592107790638380;
+    x[10] =  -2.630415236459871;
+    x[11] =  -2.2665132620567876;
+    x[12] =  -2.043834754429505;
+    x[13] =  -2.0232301911005157;
+    x[14] =  -1.8357079751751868;
+    x[15] =  -1.585873011819188;
+    x[16] =  -1.2247448713915889;
+    x[17] =  -0.87004089535290285;
+    x[18] =  -0.52403354748695763;
+    x[19] =  -0.195324784415805;
+    x[20] =   0.0000000000000000;
+    x[21] =   0.195324784415805;
+    x[22] =   0.52403354748695763;
+    x[23] =   0.87004089535290285;
+    x[24] =   1.2247448713915889;
+    x[25] =   1.585873011819188;
+    x[26] =   1.8357079751751868;
+    x[27] =   2.0232301911005157;
+    x[28] =   2.043834754429505;
+    x[29] =   2.2665132620567876;
+    x[30] =   2.630415236459871;
+    x[31] =   2.9592107790638380;
+    x[32] =   3.296114596212218;
+    x[33] =   3.6677742159463378;
+    x[34] =   4.070919267883068;
+    x[35] =   4.4995993983103881;
+    x[36] =   4.953574342912980;
+    x[37] =   5.437443360177798;
+    x[38] =   5.961461043404500;
+    x[39] =   6.547083258397540;
+    x[40] =   7.251792998192644;
+  }
+  else if ( n == 43 )
+  {
+    x[ 0] = -10.167574994881873;
+    x[ 1] =  -7.231746029072501;
+    x[ 2] =  -6.535398426382995;
+    x[ 3] =  -5.954781975039809;
+    x[ 4] =  -5.434053000365068;
+    x[ 5] =  -4.952329763008589;
+    x[ 6] =  -4.4995993983103881;
+    x[ 7] =  -4.071335874253583;
+    x[ 8] =  -3.6677742159463378;
+    x[ 9] =  -3.295265921534226;
+    x[10] =  -2.9592107790638380;
+    x[11] =  -2.633356763661946;
+    x[12] =  -2.2665132620567876;
+    x[13] =  -2.089340389294661;
+    x[14] =  -2.0232301911005157;
+    x[15] =  -1.8357079751751868;
+    x[16] =  -1.583643465293944;
+    x[17] =  -1.2247448713915889;
+    x[18] =  -0.87004089535290285;
+    x[19] =  -0.52403354748695763;
+    x[20] =  -0.196029453662011;
+    x[21] =   0.0000000000000000;
+    x[22] =   0.196029453662011;
+    x[23] =   0.52403354748695763;
+    x[24] =   0.87004089535290285;
+    x[25] =   1.2247448713915889;
+    x[26] =   1.583643465293944;
+    x[27] =   1.8357079751751868;
+    x[28] =   2.0232301911005157;
+    x[29] =   2.089340389294661;
+    x[30] =   2.2665132620567876;
+    x[31] =   2.633356763661946;
+    x[32] =   2.9592107790638380;
+    x[33] =   3.295265921534226;
+    x[34] =   3.6677742159463378;
+    x[35] =   4.071335874253583;
+    x[36] =   4.4995993983103881;
+    x[37] =   4.952329763008589;
+    x[38] =   5.434053000365068;
+    x[39] =   5.954781975039809;
+    x[40] =   6.535398426382995;
+    x[41] =   7.231746029072501;
+    x[42] =  10.167574994881873;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_GENZ_KEISTER_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal input value of N.\n";
+    std::cerr << "  N must be 1, 3, 9, 19, 35, 37, 41 or 43.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void hermite_genz_keister_lookup_points_np ( int n, int np, double p[],
+  double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GENZ_KEISTER_LOOKUP_POINTS_NP looks up Genz-Keister Hermite abscissas.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+?, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and a final rule of order
+//    35, 37, 41 or 43.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29,
+//    with the final rule of precision 51, 55, 63 or 67.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Florian Heiss, Viktor Winschel,
+//    Likelihood approximation by numerical integration on sparse grids,
+//    Journal of Econometrics,
+//    Volume 144, 2008, pages 62-80.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, 35, 37, 41 or 43.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::hermite_genz_keister_lookup_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_genz_keister_lookup_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GENZ_KEISTER_LOOKUP_WEIGHTS looks up Genz-Keister Hermite weights.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+?, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and a final rule of order
+//    35, 37, 41 or 43.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29,
+//    with the final rule of precision 51, 55, 63 or 67.
+//
+//    Three related families begin the same way, but end with a different final
+//    rule.  As a convenience, this function includes these final rules as well:
+//
+//    Designation  Orders       Precisions
+//
+//    1+2+6+10+16, 1,3,9,19,35  1,5,15,29,51
+//    1+2+6+10+18  1,3,9,19,37  1,5,15,29,55
+//    1+2+6+10+22  1,3,9,19,41  1,5,15,29,63
+//    1+2+6+10+24  1,3,9,19,43  1,5,15,29,67
+//
+//    Some of the data in this function was kindly supplied directly by
+//    Alan Genz on 24 April 2011.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Florian Heiss, Viktor Winschel,
+//    Likelihood approximation by numerical integration on sparse grids,
+//    Journal of Econometrics,
+//    Volume 144, 2008, pages 62-80.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, 35, 37, 41, or 43.
+//
+//    Output, double W[N], the weights.
+//
+{
+  static double sqrtpi = 1.7724538509055159;
+
+  if ( n == 1 )
+  {
+    w[ 0] =   1.7724538509055159E+00;
+  }
+  else if ( n == 3 )
+  {
+    w[ 0] =   2.9540897515091930E-01;
+    w[ 1] =   1.1816359006036772E+00;
+    w[ 2] =   2.9540897515091930E-01;
+  }
+  else if ( n == 9 )
+  {
+    w[ 0] =   1.6708826306882348E-04;
+    w[ 1] =   1.4173117873979098E-02;
+    w[ 2] =   1.6811892894767771E-01;
+    w[ 3] =   4.7869428549114124E-01;
+    w[ 4] =   4.5014700975378197E-01;
+    w[ 5] =   4.7869428549114124E-01;
+    w[ 6] =   1.6811892894767771E-01;
+    w[ 7] =   1.4173117873979098E-02;
+    w[ 8] =   1.6708826306882348E-04;
+  }
+  else if ( n == 19 )
+  {
+    w[ 0] =   1.5295717705322357E-09;
+    w[ 1] =   1.0802767206624762E-06;
+    w[ 2] =   1.0656589772852267E-04;
+    w[ 3] =   5.1133174390883855E-03;
+    w[ 4] =  -1.1232438489069229E-02;
+    w[ 5] =   3.2055243099445879E-02;
+    w[ 6] =   1.1360729895748269E-01;
+    w[ 7] =   1.0838861955003017E-01;
+    w[ 8] =   3.6924643368920851E-01;
+    w[ 9] =   5.3788160700510168E-01;
+    w[10] =   3.6924643368920851E-01;
+    w[11] =   1.0838861955003017E-01;
+    w[12] =   1.1360729895748269E-01;
+    w[13] =   3.2055243099445879E-02;
+    w[14] =  -1.1232438489069229E-02;
+    w[15] =   5.1133174390883855E-03;
+    w[16] =   1.0656589772852267E-04;
+    w[17] =   1.0802767206624762E-06;
+    w[18] =   1.5295717705322357E-09;
+  }
+  else if ( n == 35 )
+  {
+    w[ 0] =   1.8684014894510604E-18;
+    w[ 1] =   9.6599466278563243E-15;
+    w[ 2] =   5.4896836948499462E-12;
+    w[ 3] =   8.1553721816916897E-10;
+    w[ 4] =   3.7920222392319532E-08;
+    w[ 5] =   4.3737818040926989E-07;
+    w[ 6] =   4.8462799737020461E-06;
+    w[ 7] =   6.3328620805617891E-05;
+    w[ 8] =   4.8785399304443770E-04;
+    w[ 9] =   1.4515580425155904E-03;
+    w[10] =   4.0967527720344047E-03;
+    w[11] =   5.5928828911469180E-03;
+    w[12] =   2.7780508908535097E-02;
+    w[13] =   8.0245518147390893E-02;
+    w[14] =   1.6371221555735804E-01;
+    w[15] =   2.6244871488784277E-01;
+    w[16] =   3.3988595585585218E-01;
+    w[17] =   9.1262675363737921E-04;
+    w[18] =   3.3988595585585218E-01;
+    w[19] =   2.6244871488784277E-01;
+    w[20] =   1.6371221555735804E-01;
+    w[21] =   8.0245518147390893E-02;
+    w[22] =   2.7780508908535097E-02;
+    w[23] =   5.5928828911469180E-03;
+    w[24] =   4.0967527720344047E-03;
+    w[25] =   1.4515580425155904E-03;
+    w[26] =   4.8785399304443770E-04;
+    w[27] =   6.3328620805617891E-05;
+    w[28] =   4.8462799737020461E-06;
+    w[29] =   4.3737818040926989E-07;
+    w[30] =   3.7920222392319532E-08;
+    w[31] =   8.1553721816916897E-10;
+    w[32] =   5.4896836948499462E-12;
+    w[33] =   9.6599466278563243E-15;
+    w[34] =   1.8684014894510604E-18;
+  }
+  else if ( n == 37 )
+  {
+    w[ 0] = 0.337304188079177058E-20;
+    w[ 1] = 0.332834739632930463E-16;
+    w[ 2] = 0.323016866782871498E-13;
+    w[ 3] = 0.809333688669950037E-11;
+    w[ 4] = 0.748907559239519284E-09;
+    w[ 5] = 0.294146671497083432E-07;
+    w[ 6] = 0.524482423744884136E-06;
+    w[ 7] = 0.586639457073896277E-05;
+    w[ 8] = 0.571885531470621903E-04;
+    w[ 9] = 0.41642095727577091E-03;
+    w[10] = 0.174733389581099482E-02;
+    w[11] = 0.313373786000304381E-02;
+    w[12] = 0.768092665770660459E-02;
+    w[13] = 0.274962713372148476E-01;
+    w[14] = 0.783630990508037449E-01;
+    w[15] = 0.16611584261479281E+00;
+    w[16] = 0.253636910481387185E+00;
+    w[17] = 0.261712932511430884E+00;
+    w[18] = 0.171719680968980257E+00;
+    w[19] = 0.261712932511430884E+00;
+    w[20] = 0.253636910481387185E+00;
+    w[21] = 0.16611584261479281E+00;
+    w[22] = 0.783630990508037449E-01;
+    w[23] = 0.274962713372148476E-01;
+    w[24] = 0.768092665770660459E-02;
+    w[25] = 0.313373786000304381E-02;
+    w[26] = 0.174733389581099482E-02;
+    w[27] = 0.41642095727577091E-03;
+    w[28] = 0.571885531470621903E-04;
+    w[29] = 0.586639457073896277E-05;
+    w[30] = 0.524482423744884136E-06;
+    w[31] = 0.294146671497083432E-07;
+    w[32] = 0.748907559239519284E-09;
+    w[33] = 0.809333688669950037E-11;
+    w[34] = 0.323016866782871498E-13;
+    w[35] = 0.332834739632930463E-16;
+    w[36] = 0.337304188079177058E-20;
+  }
+  else if ( n == 41 )
+  {
+    w[ 0] =   0.117725656974405367E-22;
+    w[ 1] =   0.152506745534300636E-18;
+    w[ 2] =   0.202183949965101288E-15;
+    w[ 3] =   0.724614869051195508E-13;
+    w[ 4] =   0.103121966469463034E-10;
+    w[ 5] =   0.710371395169350952E-09;
+    w[ 6] =   0.264376044449260516E-07;
+    w[ 7] =   0.558982787078644997E-06;
+    w[ 8] =   0.675628907134744976E-05;
+    w[ 9] =   0.512198007019776873E-04;
+    w[10] =   0.335013114947200879E-03;
+    w[11] =   0.249379691096933139E-02;
+    w[12] = - 0.25616995850607458E-01;
+    w[13] =   0.317007878644325588E-01;
+    w[14] =   0.125041498584003435E-02;
+    w[15] =   0.293244560924894295E-01;
+    w[16] =   0.799536390803302298E-01;
+    w[17] =   0.164543666806555251E+00;
+    w[18] =   0.258718519718241095E+00;
+    w[19] =   0.293588795735908566E+00;
+    w[20] =   0.997525375254611951E-01;
+    w[21] =   0.293588795735908566E+00;
+    w[22] =   0.258718519718241095E+00;
+    w[23] =   0.164543666806555251E+00;
+    w[24] =   0.799536390803302298E-01;
+    w[25] =   0.293244560924894295E-01;
+    w[26] =   0.125041498584003435E-02;
+    w[27] =   0.317007878644325588E-01;
+    w[28] = - 0.25616995850607458E-01;
+    w[29] =   0.249379691096933139E-02;
+    w[30] =   0.335013114947200879E-03;
+    w[31] =   0.512198007019776873E-04;
+    w[32] =   0.675628907134744976E-05;
+    w[33] =   0.558982787078644997E-06;
+    w[34] =   0.264376044449260516E-07;
+    w[35] =   0.710371395169350952E-09;
+    w[36] =   0.103121966469463034E-10;
+    w[37] =   0.724614869051195508E-13;
+    w[38] =   0.202183949965101288E-15;
+    w[39] =   0.152506745534300636E-18;
+    w[40] =   0.117725656974405367E-22;
+  }
+  else if ( n == 43 )
+  {
+    w[ 0] =   0.968100020641528185E-37;
+    w[ 1] =   0.15516931262860431E-22;
+    w[ 2] =   0.175937309107750992E-18;
+    w[ 3] =   0.217337608710893738E-15;
+    w[ 4] =   0.747837010380540069E-13;
+    w[ 5] =   0.104028132097205732E-10;
+    w[ 6] =   0.70903573389336778E-09;
+    w[ 7] =   0.263481722999966618E-07;
+    w[ 8] =   0.560127964848432175E-06;
+    w[ 9] =   0.680410934802210232E-05;
+    w[10] =   0.508343873102544037E-04;
+    w[11] =   0.32753080006610181E-03;
+    w[12] =   0.267479828788552937E-02;
+    w[13] = - 0.687704270963253854E-02;
+    w[14] =   0.119383201790913588E-01;
+    w[15] =   0.248083722871002796E-02;
+    w[16] =   0.29000335749726387E-01;
+    w[17] =   0.798689557875757008E-01;
+    w[18] =   0.164609842422580606E+00;
+    w[19] =   0.258535954731607738E+00;
+    w[20] =   0.292243810406117141E+00;
+    w[21] =   0.102730713753441829E+00;
+    w[22] =   0.292243810406117141E+00;
+    w[23] =   0.258535954731607738E+00;
+    w[24] =   0.164609842422580606E+00;
+    w[25] =   0.798689557875757008E-01;
+    w[26] =   0.29000335749726387E-01;
+    w[27] =   0.248083722871002796E-02;
+    w[28] =   0.119383201790913588E-01;
+    w[29] = - 0.687704270963253854E-02;
+    w[30] =   0.267479828788552937E-02;
+    w[31] =   0.32753080006610181E-03;
+    w[32] =   0.508343873102544037E-04;
+    w[33] =   0.680410934802210232E-05;
+    w[34] =   0.560127964848432175E-06;
+    w[35] =   0.263481722999966618E-07;
+    w[36] =   0.70903573389336778E-09;
+    w[37] =   0.104028132097205732E-10;
+    w[38] =   0.747837010380540069E-13;
+    w[39] =   0.217337608710893738E-15;
+    w[40] =   0.175937309107750992E-18;
+    w[41] =   0.15516931262860431E-22;
+    w[42] =   0.968100020641528185E-37;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_GENZ_KEISTER_LOOKUP_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Illegal input value of N.\n";
+    std::cerr << "  N must be 1, 3, 9, 19, 35, 37, 41 or 43.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void hermite_genz_keister_lookup_weights_np ( int n, int np, double p[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GENZ_KEISTER_LOOKUP_WEIGHTS_NP looks up Genz-Keister Hermite weights.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+?, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and a final rule of order
+//    35, 37, 41 or 43.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29,
+//    with the final rule of precision 51, 55, 63 or 67.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Florian Heiss, Viktor Winschel,
+//    Likelihood approximation by numerical integration on sparse grids,
+//    Journal of Econometrics,
+//    Volume 144, 2008, pages 62-80.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, 35, 37, 41 or 43.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hermite_genz_keister_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_gk18_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GK18_LOOKUP_POINTS: abscissas of a Hermite Genz-Keister 18 rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+18, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and 37.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29, and 55.
+//
+//    Some of the data in this function was kindly supplied directly by
+//    Alan Genz on 24 April 2011.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    30 April 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Florian Heiss, Viktor Winschel,
+//    Likelihood approximation by numerical integration on sparse grids,
+//    Journal of Econometrics,
+//    Volume 144, 2008, pages 62-80.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, or 37.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[ 0] =   0.0000000000000000E+00;
+  }
+  else if ( n == 3 )
+  {
+    x[ 0] =  -1.2247448713915889E+00;
+    x[ 1] =   0.0000000000000000E+00;
+    x[ 2] =   1.2247448713915889E+00;
+  }
+  else if ( n == 9 )
+  {
+    x[ 0] =  -2.9592107790638380E+00;
+    x[ 1] =  -2.0232301911005157E+00;
+    x[ 2] =  -1.2247448713915889E+00;
+    x[ 3] =  -5.2403354748695763E-01;
+    x[ 4] =   0.0000000000000000E+00;
+    x[ 5] =   5.2403354748695763E-01;
+    x[ 6] =   1.2247448713915889E+00;
+    x[ 7] =   2.0232301911005157E+00;
+    x[ 8] =   2.9592107790638380E+00;
+  }
+  else if ( n == 19 )
+  {
+    x[ 0] =  -4.4995993983103881E+00;
+    x[ 1] =  -3.6677742159463378E+00;
+    x[ 2] =  -2.9592107790638380E+00;
+    x[ 3] =  -2.2665132620567876E+00;
+    x[ 4] =  -2.0232301911005157E+00;
+    x[ 5] =  -1.8357079751751868E+00;
+    x[ 6] =  -1.2247448713915889E+00;
+    x[ 7] =  -8.7004089535290285E-01;
+    x[ 8] =  -5.2403354748695763E-01;
+    x[ 9] =   0.0000000000000000E+00;
+    x[10] =   5.2403354748695763E-01;
+    x[11] =   8.7004089535290285E-01;
+    x[12] =   1.2247448713915889E+00;
+    x[13] =   1.8357079751751868E+00;
+    x[14] =   2.0232301911005157E+00;
+    x[15] =   2.2665132620567876E+00;
+    x[16] =   2.9592107790638380E+00;
+    x[17] =   3.6677742159463378E+00;
+    x[18] =   4.4995993983103881E+00;
+  }
+  else if ( n == 35 )
+  {
+    x[ 0] =  -6.853200069757519;
+    x[ 1] =  -6.124527854622158;
+    x[ 2] =  -5.521865209868350;
+    x[ 3] =  -4.986551454150765;
+    x[ 4] =  -4.499599398310388;
+    x[ 5] =  -4.057956316089741;
+    x[ 6] =  -3.667774215946338;
+    x[ 7] =  -3.315584617593290;
+    x[ 8] =  -2.959210779063838;
+    x[ 9] =  -2.597288631188366;
+    x[10] =  -2.266513262056788;
+    x[11] =  -2.023230191100516;
+    x[12] =  -1.835707975175187;
+    x[13] =  -1.561553427651873;
+    x[14] =  -1.224744871391589;
+    x[15] =  -0.870040895352903;
+    x[16] =  -0.524033547486958;
+    x[17] =  -0.214618180588171;
+    x[18] =   0.000000000000000;
+    x[19] =   0.214618180588171;
+    x[20] =   0.524033547486958;
+    x[21] =   0.870040895352903;
+    x[22] =   1.224744871391589;
+    x[23] =   1.561553427651873;
+    x[24] =   1.835707975175187;
+    x[25] =   2.023230191100516;
+    x[26] =   2.266513262056788;
+    x[27] =   2.597288631188366;
+    x[28] =   2.959210779063838;
+    x[29] =   3.315584617593290;
+    x[30] =   3.667774215946338;
+    x[31] =   4.057956316089741;
+    x[32] =   4.499599398310388;
+    x[33] =   4.986551454150765;
+    x[34] =   5.521865209868350;
+    x[35] =   6.124527854622158;
+    x[36] =   6.853200069757519;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_GK18_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal input value of N.\n";
+    std::cerr << "  N must be 1, 3, 9, 19, or 37.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void hermite_gk22_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GK22_LOOKUP_POINTS looks up Hermite Genz-Keister 22 points.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+16, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and 41.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29, and 63.
+//
+//    Some of the data in this function was kindly supplied directly by
+//    Alan Genz on 24 April 2011.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 April 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9, 19, or 41.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[ 0] =   0.0000000000000000E+00;
+  }
+  else if ( n == 3 )
+  {
+    x[ 0] =  -1.2247448713915889E+00;
+    x[ 1] =   0.0000000000000000E+00;
+    x[ 2] =   1.2247448713915889E+00;
+  }
+  else if ( n == 9 )
+  {
+    x[ 0] =  -2.9592107790638380E+00;
+    x[ 1] =  -2.0232301911005157E+00;
+    x[ 2] =  -1.2247448713915889E+00;
+    x[ 3] =  -5.2403354748695763E-01;
+    x[ 4] =   0.0000000000000000E+00;
+    x[ 5] =   5.2403354748695763E-01;
+    x[ 6] =   1.2247448713915889E+00;
+    x[ 7] =   2.0232301911005157E+00;
+    x[ 8] =   2.9592107790638380E+00;
+  }
+  else if ( n == 19 )
+  {
+    x[ 0] =  -4.4995993983103881E+00;
+    x[ 1] =  -3.6677742159463378E+00;
+    x[ 2] =  -2.9592107790638380E+00;
+    x[ 3] =  -2.2665132620567876E+00;
+    x[ 4] =  -2.0232301911005157E+00;
+    x[ 5] =  -1.8357079751751868E+00;
+    x[ 6] =  -1.2247448713915889E+00;
+    x[ 7] =  -8.7004089535290285E-01;
+    x[ 8] =  -5.2403354748695763E-01;
+    x[ 9] =   0.0000000000000000E+00;
+    x[10] =   5.2403354748695763E-01;
+    x[11] =   8.7004089535290285E-01;
+    x[12] =   1.2247448713915889E+00;
+    x[13] =   1.8357079751751868E+00;
+    x[14] =   2.0232301911005157E+00;
+    x[15] =   2.2665132620567876E+00;
+    x[16] =   2.9592107790638380E+00;
+    x[17] =   3.6677742159463378E+00;
+    x[18] =   4.4995993983103881E+00;
+  }
+  else if ( n == 41 )
+  {
+    x[ 0] =  -7.251792998192644;
+    x[ 1] =  -6.547083258397540;
+    x[ 2] =  -5.961461043404500;
+    x[ 3] =  -5.437443360177798;
+    x[ 4] =  -4.953574342912980;
+    x[ 5] =  -4.4995993983103881;
+    x[ 6] =  -4.070919267883068;
+    x[ 7] =  -3.6677742159463378;
+    x[ 8] =  -3.296114596212218;
+    x[ 9] =  -2.9592107790638380;
+    x[10] =  -2.630415236459871;
+    x[11] =  -2.2665132620567876;
+    x[12] =  -2.043834754429505;
+    x[13] =  -2.0232301911005157;
+    x[14] =  -1.8357079751751868;
+    x[15] =  -1.585873011819188;
+    x[16] =  -1.2247448713915889;
+    x[17] =  -0.87004089535290285;
+    x[18] =  -0.52403354748695763;
+    x[19] =  -0.195324784415805;
+    x[20] =   0.0000000000000000;
+    x[21] =   0.195324784415805;
+    x[22] =   0.52403354748695763;
+    x[23] =   0.87004089535290285;
+    x[24] =   1.2247448713915889;
+    x[25] =   1.585873011819188;
+    x[26] =   1.8357079751751868;
+    x[27] =   2.0232301911005157;
+    x[28] =   2.043834754429505;
+    x[29] =   2.2665132620567876;
+    x[30] =   2.630415236459871;
+    x[31] =   2.9592107790638380;
+    x[32] =   3.296114596212218;
+    x[33] =   3.6677742159463378;
+    x[34] =   4.070919267883068;
+    x[35] =   4.4995993983103881;
+    x[36] =   4.953574342912980;
+    x[37] =   5.437443360177798;
+    x[38] =   5.961461043404500;
+    x[39] =   6.547083258397540;
+    x[40] =   7.251792998192644;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_GK22_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal input value of N.\n";
+    std::cerr << "  N must be 1, 3, 9, 19, or 41.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void hermite_gk24_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_GK24_LOOKUP_POINTS looks up Hermite Genz-Keister 24 points.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo <= x <= +oo ) f(x) exp ( - x * x ) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) )
+//
+//    A nested family of rules for the Hermite integration problem
+//    was produced by Genz and Keister.  The structure of the nested
+//    family was denoted by 1+2+6+10+16, that is, it comprised rules
+//    of successive orders O = 1, 3, 9, 19, and 43.
+//
+//    The precisions of these rules are P = 1, 5, 15, 29, and 67.
+//
+//    Some of the data in this function was kindly supplied directly by
+//    Alan Genz on 24 April 2011.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    26 April 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Alan Genz, Bradley Keister,
+//    Fully symmetric interpolatory rules for multiple integrals
+//    over infinite regions with Gaussian weight,
+//    Journal of Computational and Applied Mathematics,
+//    Volume 71, 1996, pages 299-309
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be 1, 3, 9 19, or 43.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[ 0] =   0.0000000000000000E+00;
+  }
+  else if ( n == 3 )
+  {
+    x[ 0] =  -1.2247448713915889E+00;
+    x[ 1] =   0.0000000000000000E+00;
+    x[ 2] =   1.2247448713915889E+00;
+  }
+  else if ( n == 9 )
+  {
+    x[ 0] =  -2.9592107790638380E+00;
+    x[ 1] =  -2.0232301911005157E+00;
+    x[ 2] =  -1.2247448713915889E+00;
+    x[ 3] =  -5.2403354748695763E-01;
+    x[ 4] =   0.0000000000000000E+00;
+    x[ 5] =   5.2403354748695763E-01;
+    x[ 6] =   1.2247448713915889E+00;
+    x[ 7] =   2.0232301911005157E+00;
+    x[ 8] =   2.9592107790638380E+00;
+  }
+  else if ( n == 19 )
+  {
+    x[ 0] =  -4.4995993983103881E+00;
+    x[ 1] =  -3.6677742159463378E+00;
+    x[ 2] =  -2.9592107790638380E+00;
+    x[ 3] =  -2.2665132620567876E+00;
+    x[ 4] =  -2.0232301911005157E+00;
+    x[ 5] =  -1.8357079751751868E+00;
+    x[ 6] =  -1.2247448713915889E+00;
+    x[ 7] =  -8.7004089535290285E-01;
+    x[ 8] =  -5.2403354748695763E-01;
+    x[ 9] =   0.0000000000000000E+00;
+    x[10] =   5.2403354748695763E-01;
+    x[11] =   8.7004089535290285E-01;
+    x[12] =   1.2247448713915889E+00;
+    x[13] =   1.8357079751751868E+00;
+    x[14] =   2.0232301911005157E+00;
+    x[15] =   2.2665132620567876E+00;
+    x[16] =   2.9592107790638380E+00;
+    x[17] =   3.6677742159463378E+00;
+    x[18] =   4.4995993983103881E+00;
+  }
+  else if ( n == 43 )
+  {
+    x[ 0] = -10.167574994881873;
+    x[ 1] =  -7.231746029072501;
+    x[ 2] =  -6.535398426382995;
+    x[ 3] =  -5.954781975039809;
+    x[ 4] =  -5.434053000365068;
+    x[ 5] =  -4.952329763008589;
+    x[ 6] =  -4.4995993983103881;
+    x[ 7] =  -4.071335874253583;
+    x[ 8] =  -3.6677742159463378;
+    x[ 9] =  -3.295265921534226;
+    x[10] =  -2.9592107790638380;
+    x[11] =  -2.633356763661946;
+    x[12] =  -2.2665132620567876;
+    x[13] =  -2.089340389294661;
+    x[14] =  -2.0232301911005157;
+    x[15] =  -1.8357079751751868;
+    x[16] =  -1.583643465293944;
+    x[17] =  -1.2247448713915889;
+    x[18] =  -0.87004089535290285;
+    x[19] =  -0.52403354748695763;
+    x[20] =  -0.196029453662011;
+    x[21] =   0.0000000000000000;
+    x[22] =   0.196029453662011;
+    x[23] =   0.52403354748695763;
+    x[24] =   0.87004089535290285;
+    x[25] =   1.2247448713915889;
+    x[26] =   1.583643465293944;
+    x[27] =   1.8357079751751868;
+    x[28] =   2.0232301911005157;
+    x[29] =   2.089340389294661;
+    x[30] =   2.2665132620567876;
+    x[31] =   2.633356763661946;
+    x[32] =   2.9592107790638380;
+    x[33] =   3.295265921534226;
+    x[34] =   3.6677742159463378;
+    x[35] =   4.071335874253583;
+    x[36] =   4.4995993983103881;
+    x[37] =   4.952329763008589;
+    x[38] =   5.434053000365068;
+    x[39] =   5.954781975039809;
+    x[40] =   6.535398426382995;
+    x[41] =   7.231746029072501;
+    x[42] =  10.167574994881873;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_GK24_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal input value of N.\n";
+    std::cerr << "  N must be 1, 3, 9, 19, or 43.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+double hermite_integral ( int n )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_INTEGRAL evaluates a monomial Hermite integral.
+//
+//  Discussion:
+//
+//    H(n) = Integral ( -oo < x < +oo ) x^n exp(-x^2) dx
+//
+//    H(n) is 0 for n odd.
+//
+//    H(n) = (n-1)!! * sqrt(pi) / 2^(n/2) for n even.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order of the integral.
+//    0 <= N.
+//
+//    Output, double VALUE, the value of the integral.
+//
+{
+  double pi = 3.141592653589793;
+  double value;
+
+  if ( n < 0 )
+  {
+    value = - webbur::r8_huge ( );
+  }
+  else if ( ( n % 2 ) == 1 )
+  {
+    value = 0.0;
+  }
+  else
+  {
+    value = webbur::r8_factorial2 ( n - 1 ) * std::sqrt ( pi )
+      / std::pow ( 2.0, n / 2 );
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+void hermite_interpolant ( int n, double x[], double y[], double yp[],
+  double xd[], double yd[], double xdp[], double ydp[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_INTERPOLANT sets up a divided difference table from Hermite data.
+//
+//  Discussion:
+//
+//    The polynomial represented by the divided difference table can be
+//    evaluated by calling DIF_VALS.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Carl deBoor,
+//    A Practical Guide to Splines,
+//    Springer, 2001,
+//    ISBN: 0387953663,
+//    LC: QA1.A647.v27.
+//
+//  Parameters:
+//
+//    Input, int N, of items of data
+//    ( X(I), Y(I), YP(I) ).
+//
+//    Input, double X[N], the abscissas.
+//    These values must be distinct.
+//
+//    Input, double Y[N], YP[N], the function and derivative values.
+//
+//    Output, double XD[2*N], YD[2*N], the divided difference table
+//    for the interpolant value.
+//
+//    Output, double XDP[2*N-1], YDP[2*N-1], the divided difference
+//    table for the interpolant derivative.
+//
+{
+  int i;
+  int j;
+  int nd;
+  int ndp;
+//
+//  Copy the data.
+//
+  nd = 2 * n;
+
+  for ( i = 0; i < n; i++ )
+  {
+    xd[0+i*2] = x[i];
+    xd[1+i*2] = x[i];
+  }
+//
+//  Carry out the first step of differencing.
+//
+  yd[0] = y[0];
+  for ( i = 1; i < n; i++ )
+  {
+    yd[0+2*i] = ( y[i] - y[i-1] ) / ( x[i] - x[i-1] );
+  }
+  for ( i = 0; i < n; i++ )
+  {
+    yd[1+2*i] = yp[i];
+  }
+//
+//  Carry out the remaining steps in the usual way.
+//
+  for ( i = 2; i < nd; i++ )
+  {
+    for ( j = nd - 1; i <= j; j-- )
+    {
+      yd[j] = ( yd[j] - yd[j-1] ) / ( xd[j] - xd[j-i] );
+    }
+  }
+//
+//  Compute the difference table for the derivative.
+//
+  webbur::dif_deriv ( nd, xd, yd, &ndp, xdp, ydp );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_interpolant_rule ( int n, double a, double b, double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_INTERPOLANT_RULE: quadrature rule for a Hermite interpolant.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of abscissas.
+//
+//    Input, double A, B, the integration limits.
+//
+//    Input, double X[N], the abscissas.
+//
+//    Output, double W[2*N], the quadrature
+//    coefficients, given as pairs for function and derivative values
+//    at each abscissa.
+//
+{
+  double a_value;
+  double b_value;
+  double *c;
+  int i;
+  int j;
+  int k;
+  int nd;
+  int ndp;
+  double *xd;
+  double *xdp;
+  double *y;
+  double *yd;
+  double *ydp;
+  double *yp;
+
+  y = new double[n];
+  yp = new double[n];
+
+  nd = 2 * n;
+  c = new double[nd];
+  xd = new double[nd];
+  yd = new double[nd];
+
+  ndp = 2 * n - 1;
+  xdp = new double[ndp];
+  ydp = new double[ndp];
+
+  for ( i = 0; i < n; i++ )
+  {
+    y[i] = 0.0;
+    yp[i] = 0.0;
+  }
+
+  k = 0;
+
+  for ( i = 0; i < n; i++ )
+  {
+    y[i] = 1.0;
+    webbur::hermite_interpolant ( n, x, y, yp, xd, yd, xdp, ydp );
+    webbur::dif_to_r8poly ( nd, xd, yd, c );
+    a_value = webbur::r8poly_ant_val ( n, c, a );
+    b_value = webbur::r8poly_ant_val ( n, c, b );
+    w[k] = b_value - a_value;
+    y[i] = 0.0;
+    k = k + 1;
+
+    yp[i] = 1.0;
+    webbur::hermite_interpolant ( n, x, y, yp, xd, yd, xdp, ydp );
+    webbur::dif_to_r8poly ( nd, xd, yd, c );
+    a_value = webbur::r8poly_ant_val ( n, c, a );
+    b_value = webbur::r8poly_ant_val ( n, c, b );
+    w[k] = b_value - a_value;
+    yp[i] = 0.0;
+    k = k + 1;
+  }
+
+  delete [] c;
+  delete [] xd;
+  delete [] xdp;
+  delete [] y;
+  delete [] yd;
+  delete [] ydp;
+  delete [] yp;
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_interpolant_value ( int nd, double xd[], double yd[], double xdp[],
+  double ydp[], int nv, double xv[], double yv[], double yvp[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_INTERPOLANT_VALUE evaluates the Hermite interpolant polynomial.
+//
+//  Discussion:
+//
+//    In fact, this function will evaluate an arbitrary polynomial that is
+//    represented by a difference table.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Carl deBoor,
+//    A Practical Guide to Splines,
+//    Springer, 2001,
+//    ISBN: 0387953663,
+//    LC: QA1.A647.v27.
+//
+//  Parameters:
+//
+//    Input, int ND, the order of the difference table.
+//
+//    Input, double XD[ND], YD[ND], the difference table for the
+//    interpolant value.
+//
+//    Input, double XDP[ND-1], YDP[ND-1], the difference table for
+//    the interpolant derivative.
+//
+//    Input, int NV, the number of evaluation points.
+//
+//    Input, double XV[NV], the evaluation points.
+//
+//    Output, double YV[NV], YVP[NV], the value of the interpolant and
+//    its derivative at the evaluation points.
+//
+{
+  int i;
+  int j;
+  int ndp;
+
+  ndp = nd - 1;
+
+  for ( j = 0; j < nv; j++ )
+  {
+    yv[j] = yd[nd-1];
+    for ( i = nd - 2; 0 <= i; i-- )
+    {
+      yv[j] = yd[i] + ( xv[j] - xd[i] ) * yv[j];
+    }
+
+    yvp[j] = ydp[ndp-1];
+    for ( i = ndp - 2; 0 <= i; i-- )
+    {
+      yvp[j] = ydp[i] + ( xv[j] - xdp[i] ) * yvp[j];
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void hermite_lookup ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_LOOKUP looks up abscissas and weights for Gauss-Hermite quadrature.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798.
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3,
+//    LC: QA47.M315.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 20.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::hermite_lookup_points ( n, x );
+
+  webbur::hermite_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_LOOKUP_POINTS looks up abscissas for Hermite quadrature.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo < x < +oo ) exp ( - x * x ) * f(x) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) ).
+//
+//    Mathematica can numerically estimate the abscissas
+//    of order N to P digits by the command:
+//
+//      NSolve [ HermiteH [ n, x ] == 0, x, p ]
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798,
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3,
+//    LC: QA47.M315.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 20.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[0] = 0.0;
+  }
+  else if ( n == 2 )
+  {
+    x[0] = - 0.707106781186547524400844362105E+00;
+    x[1] =   0.707106781186547524400844362105E+00;
+  }
+  else if ( n == 3 )
+  {
+    x[0] = - 0.122474487139158904909864203735E+01;
+    x[1] =   0.0E+00;
+    x[2] =   0.122474487139158904909864203735E+01;
+  }
+  else if ( n == 4 )
+  {
+    x[0] = - 0.165068012388578455588334111112E+01;
+    x[1] = - 0.524647623275290317884060253835E+00;
+    x[2] =   0.524647623275290317884060253835E+00;
+    x[3] =   0.165068012388578455588334111112E+01;
+  }
+  else if ( n == 5 )
+  {
+    x[0] = - 0.202018287045608563292872408814E+01;
+    x[1] = - 0.958572464613818507112770593893E+00;
+    x[2] =   0.0E+00;
+    x[3] =   0.958572464613818507112770593893E+00;
+    x[4] =   0.202018287045608563292872408814E+01;
+  }
+  else if ( n == 6 )
+  {
+    x[0] = - 0.235060497367449222283392198706E+01;
+    x[1] = - 0.133584907401369694971489528297E+01;
+    x[2] = - 0.436077411927616508679215948251E+00;
+    x[3] =   0.436077411927616508679215948251E+00;
+    x[4] =   0.133584907401369694971489528297E+01;
+    x[5] =   0.235060497367449222283392198706E+01;
+  }
+  else if ( n == 7 )
+  {
+    x[0] = - 0.265196135683523349244708200652E+01;
+    x[1] = - 0.167355162876747144503180139830E+01;
+    x[2] = - 0.816287882858964663038710959027E+00;
+    x[3] =   0.0E+00;
+    x[4] =   0.816287882858964663038710959027E+00;
+    x[5] =   0.167355162876747144503180139830E+01;
+    x[6] =   0.265196135683523349244708200652E+01;
+  }
+  else if ( n == 8 )
+  {
+    x[0] = - 0.293063742025724401922350270524E+01;
+    x[1] = - 0.198165675669584292585463063977E+01;
+    x[2] = - 0.115719371244678019472076577906E+01;
+    x[3] = - 0.381186990207322116854718885584E+00;
+    x[4] =   0.381186990207322116854718885584E+00;
+    x[5] =   0.115719371244678019472076577906E+01;
+    x[6] =   0.198165675669584292585463063977E+01;
+    x[7] =   0.293063742025724401922350270524E+01;
+  }
+  else if ( n == 9 )
+  {
+    x[0] = - 0.319099320178152760723004779538E+01;
+    x[1] = - 0.226658058453184311180209693284E+01;
+    x[2] = - 0.146855328921666793166701573925E+01;
+    x[3] = - 0.723551018752837573322639864579E+00;
+    x[4] =   0.0E+00;
+    x[5] =   0.723551018752837573322639864579E+00;
+    x[6] =   0.146855328921666793166701573925E+01;
+    x[7] =   0.226658058453184311180209693284E+01;
+    x[8] =   0.319099320178152760723004779538E+01;
+  }
+  else if ( n == 10 )
+  {
+    x[0] =  - 0.343615911883773760332672549432E+01;
+    x[1] =  - 0.253273167423278979640896079775E+01;
+    x[2] =  - 0.175668364929988177345140122011E+01;
+    x[3] =  - 0.103661082978951365417749191676E+01;
+    x[4] =  - 0.342901327223704608789165025557E+00;
+    x[5] =    0.342901327223704608789165025557E+00;
+    x[6] =    0.103661082978951365417749191676E+01;
+    x[7] =    0.175668364929988177345140122011E+01;
+    x[8] =    0.253273167423278979640896079775E+01;
+    x[9] =    0.343615911883773760332672549432E+01;
+  }
+  else if ( n == 11 )
+  {
+    x[0] =  - 0.366847084655958251845837146485E+01;
+    x[1] =  - 0.278329009978165177083671870152E+01;
+    x[2] =  - 0.202594801582575533516591283121E+01;
+    x[3] =  - 0.132655708449493285594973473558E+01;
+    x[4] =  - 0.656809566882099765024611575383E+00;
+    x[5] =    0.0E+00;
+    x[6] =    0.656809566882099765024611575383E+00;
+    x[7] =    0.132655708449493285594973473558E+01;
+    x[8] =    0.202594801582575533516591283121E+01;
+    x[9] =    0.278329009978165177083671870152E+01;
+    x[10] =   0.366847084655958251845837146485E+01;
+  }
+  else if ( n == 12 )
+  {
+    x[0] =  - 0.388972489786978191927164274724E+01;
+    x[1] =  - 0.302063702512088977171067937518E+01;
+    x[2] =  - 0.227950708050105990018772856942E+01;
+    x[3] =  - 0.159768263515260479670966277090E+01;
+    x[4] =  - 0.947788391240163743704578131060E+00;
+    x[5] =  - 0.314240376254359111276611634095E+00;
+    x[6] =    0.314240376254359111276611634095E+00;
+    x[7] =    0.947788391240163743704578131060E+00;
+    x[8] =    0.159768263515260479670966277090E+01;
+    x[9] =    0.227950708050105990018772856942E+01;
+    x[10] =   0.302063702512088977171067937518E+01;
+    x[11] =   0.388972489786978191927164274724E+01;
+  }
+  else if ( n == 13 )
+  {
+    x[0] =  - 0.410133759617863964117891508007E+01;
+    x[1] =  - 0.324660897837240998812205115236E+01;
+    x[2] =  - 0.251973568567823788343040913628E+01;
+    x[3] =  - 0.185310765160151214200350644316E+01;
+    x[4] =  - 0.122005503659074842622205526637E+01;
+    x[5] =  - 0.605763879171060113080537108602E+00;
+    x[6] =    0.0E+00;
+    x[7] =    0.605763879171060113080537108602E+00;
+    x[8] =    0.122005503659074842622205526637E+01;
+    x[9] =    0.185310765160151214200350644316E+01;
+    x[10] =   0.251973568567823788343040913628E+01;
+    x[11] =   0.324660897837240998812205115236E+01;
+    x[12] =   0.410133759617863964117891508007E+01;
+  }
+  else if ( n == 14 )
+  {
+    x[0] =  - 0.430444857047363181262129810037E+01;
+    x[1] =  - 0.346265693360227055020891736115E+01;
+    x[2] =  - 0.274847072498540256862499852415E+01;
+    x[3] =  - 0.209518325850771681573497272630E+01;
+    x[4] =  - 0.147668273114114087058350654421E+01;
+    x[5] =  - 0.878713787329399416114679311861E+00;
+    x[6] =  - 0.291745510672562078446113075799E+00;
+    x[7] =    0.291745510672562078446113075799E+00;
+    x[8] =    0.878713787329399416114679311861E+00;
+    x[9] =    0.147668273114114087058350654421E+01;
+    x[10] =   0.209518325850771681573497272630E+01;
+    x[11] =   0.274847072498540256862499852415E+01;
+    x[12] =   0.346265693360227055020891736115E+01;
+    x[13] =   0.430444857047363181262129810037E+01;
+  }
+  else if ( n == 15 )
+  {
+    x[0] =  - 0.449999070730939155366438053053E+01;
+    x[1] =  - 0.366995037340445253472922383312E+01;
+    x[2] =  - 0.296716692790560324848896036355E+01;
+    x[3] =  - 0.232573248617385774545404479449E+01;
+    x[4] =  - 0.171999257518648893241583152515E+01;
+    x[5] =  - 0.113611558521092066631913490556E+01;
+    x[6] =  - 0.565069583255575748526020337198E+00;
+    x[7] =    0.0E+00;
+    x[8] =    0.565069583255575748526020337198E+00;
+    x[9] =    0.113611558521092066631913490556E+01;
+    x[10] =   0.171999257518648893241583152515E+01;
+    x[11] =   0.232573248617385774545404479449E+01;
+    x[12] =   0.296716692790560324848896036355E+01;
+    x[13] =   0.366995037340445253472922383312E+01;
+    x[14] =   0.449999070730939155366438053053E+01;
+  }
+  else if ( n == 16 )
+  {
+    x[0] =  - 0.468873893930581836468849864875E+01;
+    x[1] =  - 0.386944790486012269871942409801E+01;
+    x[2] =  - 0.317699916197995602681399455926E+01;
+    x[3] =  - 0.254620215784748136215932870545E+01;
+    x[4] =  - 0.195178799091625397743465541496E+01;
+    x[5] =  - 0.138025853919888079637208966969E+01;
+    x[6] =  - 0.822951449144655892582454496734E+00;
+    x[7] =  - 0.273481046138152452158280401965E+00;
+    x[8] =    0.273481046138152452158280401965E+00;
+    x[9] =    0.822951449144655892582454496734E+00;
+    x[10] =   0.138025853919888079637208966969E+01;
+    x[11] =   0.195178799091625397743465541496E+01;
+    x[12] =   0.254620215784748136215932870545E+01;
+    x[13] =   0.317699916197995602681399455926E+01;
+    x[14] =   0.386944790486012269871942409801E+01;
+    x[15] =   0.468873893930581836468849864875E+01;
+  }
+  else if ( n == 17 )
+  {
+    x[0] =  - 0.487134519367440308834927655662E+01;
+    x[1] =  - 0.406194667587547430689245559698E+01;
+    x[2] =  - 0.337893209114149408338327069289E+01;
+    x[3] =  - 0.275776291570388873092640349574E+01;
+    x[4] =  - 0.217350282666662081927537907149E+01;
+    x[5] =  - 0.161292431422123133311288254454E+01;
+    x[6] =  - 0.106764872574345055363045773799E+01;
+    x[7] =  - 0.531633001342654731349086553718E+00;
+    x[8] =    0.0E+00;
+    x[9] =    0.531633001342654731349086553718E+00;
+    x[10] =   0.106764872574345055363045773799E+01;
+    x[11] =   0.161292431422123133311288254454E+01;
+    x[12] =   0.217350282666662081927537907149E+01;
+    x[13] =   0.275776291570388873092640349574E+01;
+    x[14] =   0.337893209114149408338327069289E+01;
+    x[15] =   0.406194667587547430689245559698E+01;
+    x[16] =   0.487134519367440308834927655662E+01;
+  }
+  else if ( n == 18 )
+  {
+    x[0] =  - 0.504836400887446676837203757885E+01;
+    x[1] =  - 0.424811787356812646302342016090E+01;
+    x[2] =  - 0.357376906848626607950067599377E+01;
+    x[3] =  - 0.296137750553160684477863254906E+01;
+    x[4] =  - 0.238629908916668600026459301424E+01;
+    x[5] =  - 0.183553160426162889225383944409E+01;
+    x[6] =  - 0.130092085838961736566626555439E+01;
+    x[7] =  - 0.776682919267411661316659462284E+00;
+    x[8] =  - 0.258267750519096759258116098711E+00;
+    x[9] =    0.258267750519096759258116098711E+00;
+    x[10] =   0.776682919267411661316659462284E+00;
+    x[11] =   0.130092085838961736566626555439E+01;
+    x[12] =   0.183553160426162889225383944409E+01;
+    x[13] =   0.238629908916668600026459301424E+01;
+    x[14] =   0.296137750553160684477863254906E+01;
+    x[15] =   0.357376906848626607950067599377E+01;
+    x[16] =   0.424811787356812646302342016090E+01;
+    x[17] =   0.504836400887446676837203757885E+01;
+  }
+  else if ( n == 19 )
+  {
+    x[0] =  - 0.522027169053748216460967142500E+01;
+    x[1] =  - 0.442853280660377943723498532226E+01;
+    x[2] =  - 0.376218735196402009751489394104E+01;
+    x[3] =  - 0.315784881834760228184318034120E+01;
+    x[4] =  - 0.259113378979454256492128084112E+01;
+    x[5] =  - 0.204923170985061937575050838669E+01;
+    x[6] =  - 0.152417061939353303183354859367E+01;
+    x[7] =  - 0.101036838713431135136859873726E+01;
+    x[8] =  - 0.503520163423888209373811765050E+00;
+    x[9] =    0.0E+00;
+    x[10] =   0.503520163423888209373811765050E+00;
+    x[11] =   0.101036838713431135136859873726E+01;
+    x[12] =   0.152417061939353303183354859367E+01;
+    x[13] =   0.204923170985061937575050838669E+01;
+    x[14] =   0.259113378979454256492128084112E+01;
+    x[15] =   0.315784881834760228184318034120E+01;
+    x[16] =   0.376218735196402009751489394104E+01;
+    x[17] =   0.442853280660377943723498532226E+01;
+    x[18] =   0.522027169053748216460967142500E+01;
+  }
+  else if ( n == 20 )
+  {
+    x[0] =  - 0.538748089001123286201690041068E+01;
+    x[1] =  - 0.460368244955074427307767524898E+01;
+    x[2] =  - 0.394476404011562521037562880052E+01;
+    x[3] =  - 0.334785456738321632691492452300E+01;
+    x[4] =  - 0.278880605842813048052503375640E+01;
+    x[5] =  - 0.225497400208927552308233334473E+01;
+    x[6] =  - 0.173853771211658620678086566214E+01;
+    x[7] =  - 0.123407621539532300788581834696E+01;
+    x[8] =  - 0.737473728545394358705605144252E+00;
+    x[9] =  - 0.245340708300901249903836530634E+00;
+    x[10] =   0.245340708300901249903836530634E+00;
+    x[11] =   0.737473728545394358705605144252E+00;
+    x[12] =   0.123407621539532300788581834696E+01;
+    x[13] =   0.173853771211658620678086566214E+01;
+    x[14] =   0.225497400208927552308233334473E+01;
+    x[15] =   0.278880605842813048052503375640E+01;
+    x[16] =   0.334785456738321632691492452300E+01;
+    x[17] =   0.394476404011562521037562880052E+01;
+    x[18] =   0.460368244955074427307767524898E+01;
+    x[19] =   0.538748089001123286201690041068E+01;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::cerr << "  Legal values are 1 through 20.\n";
+    std::exit ( 1 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_lookup_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_LOOKUP_WEIGHTS looks up weights for Hermite quadrature.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -oo < x < +oo ) exp ( - x * x ) * f(x) dx
+//
+//    The quadrature rule:
+//
+//      sum ( 1 <= i <= n ) w(i) * f ( x(i) ).
+//
+//    Mathematica can numerically estimate the abscissas
+//    of order N to P digits by the command:
+//
+//      NSolve [ HermiteH [ n, x ] == 0, x, p ]
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798,
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3,
+//    LC: QA47.M315.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 20.
+//
+//    Output, double W[N], the weights.
+//
+{
+  if ( n == 1 )
+  {
+    w[0] = 1.77245385090551602729816748334;
+  }
+  else if ( n == 2 )
+  {
+    w[0] = 0.886226925452758013649083741671E+00;
+    w[1] = 0.886226925452758013649083741671E+00;
+  }
+  else if ( n == 3 )
+  {
+    w[0] = 0.295408975150919337883027913890E+00;
+    w[1] = 0.118163590060367735153211165556E+01;
+    w[2] = 0.295408975150919337883027913890E+00;
+  }
+  else if ( n == 4 )
+  {
+    w[0] = 0.813128354472451771430345571899E-01;
+    w[1] = 0.804914090005512836506049184481E+00;
+    w[2] = 0.804914090005512836506049184481E+00;
+    w[3] = 0.813128354472451771430345571899E-01;
+  }
+  else if ( n == 5 )
+  {
+    w[0] = 0.199532420590459132077434585942E-01;
+    w[1] = 0.393619323152241159828495620852E+00;
+    w[2] = 0.945308720482941881225689324449E+00;
+    w[3] = 0.393619323152241159828495620852E+00;
+    w[4] = 0.199532420590459132077434585942E-01;
+  }
+  else if ( n == 6 )
+  {
+    w[0] = 0.453000990550884564085747256463E-02;
+    w[1] = 0.157067320322856643916311563508E+00;
+    w[2] = 0.724629595224392524091914705598E+00;
+    w[3] = 0.724629595224392524091914705598E+00;
+    w[4] = 0.157067320322856643916311563508E+00;
+    w[5] = 0.453000990550884564085747256463E-02;
+  }
+  else if ( n == 7 )
+  {
+    w[0] = 0.971781245099519154149424255939E-03;
+    w[1] = 0.545155828191270305921785688417E-01;
+    w[2] = 0.425607252610127800520317466666E+00;
+    w[3] = 0.810264617556807326764876563813E+00;
+    w[4] = 0.425607252610127800520317466666E+00;
+    w[5] = 0.545155828191270305921785688417E-01;
+    w[6] = 0.971781245099519154149424255939E-03;
+  }
+  else if ( n == 8 )
+  {
+    w[0] = 0.199604072211367619206090452544E-03;
+    w[1] = 0.170779830074134754562030564364E-01;
+    w[2] = 0.207802325814891879543258620286E+00;
+    w[3] = 0.661147012558241291030415974496E+00;
+    w[4] = 0.661147012558241291030415974496E+00;
+    w[5] = 0.207802325814891879543258620286E+00;
+    w[6] = 0.170779830074134754562030564364E-01;
+    w[7] = 0.199604072211367619206090452544E-03;
+  }
+  else if ( n == 9 )
+  {
+    w[0] = 0.396069772632643819045862946425E-04;
+    w[1] = 0.494362427553694721722456597763E-02;
+    w[2] = 0.884745273943765732879751147476E-01;
+    w[3] = 0.432651559002555750199812112956E+00;
+    w[4] = 0.720235215606050957124334723389E+00;
+    w[5] = 0.432651559002555750199812112956E+00;
+    w[6] = 0.884745273943765732879751147476E-01;
+    w[7] = 0.494362427553694721722456597763E-02;
+    w[8] = 0.396069772632643819045862946425E-04;
+  }
+  else if ( n == 10 )
+  {
+    w[0] =  0.764043285523262062915936785960E-05;
+    w[1] =  0.134364574678123269220156558585E-02;
+    w[2] =  0.338743944554810631361647312776E-01;
+    w[3] =  0.240138611082314686416523295006E+00;
+    w[4] =  0.610862633735325798783564990433E+00;
+    w[5] =  0.610862633735325798783564990433E+00;
+    w[6] =  0.240138611082314686416523295006E+00;
+    w[7] =  0.338743944554810631361647312776E-01;
+    w[8] =  0.134364574678123269220156558585E-02;
+    w[9] =  0.764043285523262062915936785960E-05;
+  }
+  else if ( n == 11 )
+  {
+    w[0] =  0.143956039371425822033088366032E-05;
+    w[1] =  0.346819466323345510643413772940E-03;
+    w[2] =  0.119113954449115324503874202916E-01;
+    w[3] =  0.117227875167708503381788649308E+00;
+    w[4] =  0.429359752356125028446073598601E+00;
+    w[5] =  0.654759286914591779203940657627E+00;
+    w[6] =  0.429359752356125028446073598601E+00;
+    w[7] =  0.117227875167708503381788649308E+00;
+    w[8] =  0.119113954449115324503874202916E-01;
+    w[9] =  0.346819466323345510643413772940E-03;
+    w[10] = 0.143956039371425822033088366032E-05;
+  }
+  else if ( n == 12 )
+  {
+    w[0] =  0.265855168435630160602311400877E-06;
+    w[1] =  0.857368704358785865456906323153E-04;
+    w[2] =  0.390539058462906185999438432620E-02;
+    w[3] =  0.516079856158839299918734423606E-01;
+    w[4] =  0.260492310264161129233396139765E+00;
+    w[5] =  0.570135236262479578347113482275E+00;
+    w[6] =  0.570135236262479578347113482275E+00;
+    w[7] =  0.260492310264161129233396139765E+00;
+    w[8] =  0.516079856158839299918734423606E-01;
+    w[9] =  0.390539058462906185999438432620E-02;
+    w[10] = 0.857368704358785865456906323153E-04;
+    w[11] = 0.265855168435630160602311400877E-06;
+  }
+  else if ( n == 13 )
+  {
+    w[0] =  0.482573185007313108834997332342E-07;
+    w[1] =  0.204303604027070731248669432937E-04;
+    w[2] =  0.120745999271938594730924899224E-02;
+    w[3] =  0.208627752961699392166033805050E-01;
+    w[4] =  0.140323320687023437762792268873E+00;
+    w[5] =  0.421616296898543221746893558568E+00;
+    w[6] =  0.604393187921161642342099068579E+00;
+    w[7] =  0.421616296898543221746893558568E+00;
+    w[8] =  0.140323320687023437762792268873E+00;
+    w[9] =  0.208627752961699392166033805050E-01;
+    w[10] = 0.120745999271938594730924899224E-02;
+    w[11] = 0.204303604027070731248669432937E-04;
+    w[12] = 0.482573185007313108834997332342E-07;
+  }
+  else if ( n == 14 )
+  {
+    w[0] =  0.862859116812515794532041783429E-08;
+    w[1] =  0.471648435501891674887688950105E-05;
+    w[2] =  0.355092613551923610483661076691E-03;
+    w[3] =  0.785005472645794431048644334608E-02;
+    w[4] =  0.685055342234652055387163312367E-01;
+    w[5] =  0.273105609064246603352569187026E+00;
+    w[6] =  0.536405909712090149794921296776E+00;
+    w[7] =  0.536405909712090149794921296776E+00;
+    w[8] =  0.273105609064246603352569187026E+00;
+    w[9] =  0.685055342234652055387163312367E-01;
+    w[10] = 0.785005472645794431048644334608E-02;
+    w[11] = 0.355092613551923610483661076691E-03;
+    w[12] = 0.471648435501891674887688950105E-05;
+    w[13] = 0.862859116812515794532041783429E-08;
+  }
+  else if ( n == 15 )
+  {
+    w[0] =  0.152247580425351702016062666965E-08;
+    w[1] =  0.105911554771106663577520791055E-05;
+    w[2] =  0.100004441232499868127296736177E-03;
+    w[3] =  0.277806884291277589607887049229E-02;
+    w[4] =  0.307800338725460822286814158758E-01;
+    w[5] =  0.158488915795935746883839384960E+00;
+    w[6] =  0.412028687498898627025891079568E+00;
+    w[7] =  0.564100308726417532852625797340E+00;
+    w[8] =  0.412028687498898627025891079568E+00;
+    w[9] =  0.158488915795935746883839384960E+00;
+    w[10] = 0.307800338725460822286814158758E-01;
+    w[11] = 0.277806884291277589607887049229E-02;
+    w[12] = 0.100004441232499868127296736177E-03;
+    w[13] = 0.105911554771106663577520791055E-05;
+    w[14] = 0.152247580425351702016062666965E-08;
+  }
+  else if ( n == 16 )
+  {
+    w[0] =  0.265480747401118224470926366050E-09;
+    w[1] =  0.232098084486521065338749423185E-06;
+    w[2] =  0.271186009253788151201891432244E-04;
+    w[3] =  0.932284008624180529914277305537E-03;
+    w[4] =  0.128803115355099736834642999312E-01;
+    w[5] =  0.838100413989858294154207349001E-01;
+    w[6] =  0.280647458528533675369463335380E+00;
+    w[7] =  0.507929479016613741913517341791E+00;
+    w[8] =  0.507929479016613741913517341791E+00;
+    w[9] =  0.280647458528533675369463335380E+00;
+    w[10] = 0.838100413989858294154207349001E-01;
+    w[11] = 0.128803115355099736834642999312E-01;
+    w[12] = 0.932284008624180529914277305537E-03;
+    w[13] = 0.271186009253788151201891432244E-04;
+    w[14] = 0.232098084486521065338749423185E-06;
+    w[15] = 0.265480747401118224470926366050E-09;
+  }
+  else if ( n == 17 )
+  {
+    w[0] =  0.458057893079863330580889281222E-10;
+    w[1] =  0.497707898163079405227863353715E-07;
+    w[2] =  0.711228914002130958353327376218E-05;
+    w[3] =  0.298643286697753041151336643059E-03;
+    w[4] =  0.506734995762753791170069495879E-02;
+    w[5] =  0.409200341495762798094994877854E-01;
+    w[6] =  0.172648297670097079217645196219E+00;
+    w[7] =  0.401826469470411956577635085257E+00;
+    w[8] =  0.530917937624863560331883103379E+00;
+    w[9] =  0.401826469470411956577635085257E+00;
+    w[10] = 0.172648297670097079217645196219E+00;
+    w[11] = 0.409200341495762798094994877854E-01;
+    w[12] = 0.506734995762753791170069495879E-02;
+    w[13] = 0.298643286697753041151336643059E-03;
+    w[14] = 0.711228914002130958353327376218E-05;
+    w[15] = 0.497707898163079405227863353715E-07;
+    w[16] = 0.458057893079863330580889281222E-10;
+  }
+  else if ( n == 18 )
+  {
+    w[0] =  0.782819977211589102925147471012E-11;
+    w[1] =  0.104672057957920824443559608435E-07;
+    w[2] =  0.181065448109343040959702385911E-05;
+    w[3] =  0.918112686792940352914675407371E-04;
+    w[4] =  0.188852263026841789438175325426E-02;
+    w[5] =  0.186400423875446519219315221973E-01;
+    w[6] =  0.973017476413154293308537234155E-01;
+    w[7] =  0.284807285669979578595606820713E+00;
+    w[8] =  0.483495694725455552876410522141E+00;
+    w[9] =  0.483495694725455552876410522141E+00;
+    w[10] = 0.284807285669979578595606820713E+00;
+    w[11] = 0.973017476413154293308537234155E-01;
+    w[12] = 0.186400423875446519219315221973E-01;
+    w[13] = 0.188852263026841789438175325426E-02;
+    w[14] = 0.918112686792940352914675407371E-04;
+    w[15] = 0.181065448109343040959702385911E-05;
+    w[16] = 0.104672057957920824443559608435E-07;
+    w[17] = 0.782819977211589102925147471012E-11;
+  }
+  else if ( n == 19 )
+  {
+    w[0] =  0.132629709449851575185289154385E-11;
+    w[1] =  0.216305100986355475019693077221E-08;
+    w[2] =  0.448824314722312295179447915594E-06;
+    w[3] =  0.272091977631616257711941025214E-04;
+    w[4] =  0.670877521407181106194696282100E-03;
+    w[5] =  0.798886677772299020922211491861E-02;
+    w[6] =  0.508103869090520673569908110358E-01;
+    w[7] =  0.183632701306997074156148485766E+00;
+    w[8] =  0.391608988613030244504042313621E+00;
+    w[9] =  0.502974888276186530840731361096E+00;
+    w[10] = 0.391608988613030244504042313621E+00;
+    w[11] = 0.183632701306997074156148485766E+00;
+    w[12] = 0.508103869090520673569908110358E-01;
+    w[13] = 0.798886677772299020922211491861E-02;
+    w[14] = 0.670877521407181106194696282100E-03;
+    w[15] = 0.272091977631616257711941025214E-04;
+    w[16] = 0.448824314722312295179447915594E-06;
+    w[17] = 0.216305100986355475019693077221E-08;
+    w[18] = 0.132629709449851575185289154385E-11;
+  }
+  else if ( n == 20 )
+  {
+    w[0] =  0.222939364553415129252250061603E-12;
+    w[1] =  0.439934099227318055362885145547E-09;
+    w[2] =  0.108606937076928169399952456345E-06;
+    w[3] =  0.780255647853206369414599199965E-05;
+    w[4] =  0.228338636016353967257145917963E-03;
+    w[5] =  0.324377334223786183218324713235E-02;
+    w[6] =  0.248105208874636108821649525589E-01;
+    w[7] =  0.109017206020023320013755033535E+00;
+    w[8] =  0.286675505362834129719659706228E+00;
+    w[9] =  0.462243669600610089650328639861E+00;
+    w[10] = 0.462243669600610089650328639861E+00;
+    w[11] = 0.286675505362834129719659706228E+00;
+    w[12] = 0.109017206020023320013755033535E+00;
+    w[13] = 0.248105208874636108821649525589E-01;
+    w[14] = 0.324377334223786183218324713235E-02;
+    w[15] = 0.228338636016353967257145917963E-03;
+    w[16] = 0.780255647853206369414599199965E-05;
+    w[17] = 0.108606937076928169399952456345E-06;
+    w[18] = 0.439934099227318055362885145547E-09;
+    w[19] = 0.222939364553415129252250061603E-12;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_LOOKUP_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::cerr << "  Legal values are 1 through 20.\n";
+    std::exit ( 1 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_ss_compute ( int order, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_SS_COMPUTE computes a Hermite quadrature rule.
+//
+//  Discussion:
+//
+//    The abscissas are the zeros of the N-th order Hermite polynomial.
+//
+//    The integral:
+//
+//      Integral ( -oo < X < +oo ) exp ( - X * X ) * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double cc;
+  double dp2;
+  int i;
+  double p1;
+  double s;
+  double temp;
+  double x0;
+
+  if ( order < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "HERMITE_SS_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of ORDER = " << order << "\n";
+    std::exit ( 1 );
+  }
+
+  cc = 1.7724538509 * webbur::r8_gamma ( ( double ) ( order ) )
+    / std::pow ( 2.0, order - 1 );
+
+  s = std::pow ( 2.0 * ( double ) ( order ) + 1.0, 1.0 / 6.0 );
+
+  for ( i = 0; i < ( order + 1 ) / 2; i++ )
+  {
+    if ( i == 0 )
+    {
+      x0 = s * s * s - 1.85575 / s;
+    }
+    else if ( i == 1 )
+    {
+      x0 = x0 - 1.14 * std::pow ( ( double ) ( order ), 0.426 ) / x0;
+    }
+    else if ( i == 2 )
+    {
+      x0 = 1.86 * x0 - 0.86 * x[0];
+    }
+    else if ( i == 3 )
+    {
+      x0 = 1.91 * x0 - 0.91 * x[1];
+    }
+    else
+    {
+      x0 = 2.0 * x0 - x[i-2];
+    }
+
+    webbur::hermite_ss_root ( &x0, order, &dp2, &p1 );
+
+    x[i] = x0;
+    w[i] = ( cc / dp2 ) / p1;
+
+    x[order-i-1] = -x0;
+    w[order-i-1] = w[i];
+  }
+//
+//  Reverse the order of the abscissas.
+//
+  for ( i = 1; i <= order/2; i++ )
+  {
+    temp       = x[i-1];
+    x[i-1]     = x[order-i];
+    x[order-i] = temp;
+  }
+
+  if ( ( order % 2 ) == 1 )
+  {
+    x[(order-1)/2] = 0.0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_ss_recur ( double *p2, double *dp2, double *p1, double x, int order )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_SS_RECUR finds the value and derivative of a Hermite polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Output, double *P2, the value of H(ORDER)(X).
+//
+//    Output, double *DP2, the value of H'(ORDER)(X).
+//
+//    Output, double *P1, the value of H(ORDER-1)(X).
+//
+//    Input, double X, the point at which polynomials are evaluated.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+{
+  int i;
+  double dq0;
+  double dq1;
+  double dq2;
+  double q0;
+  double q1;
+  double q2;
+
+  q1 = 1.0;
+  dq1 = 0.0;
+
+  q2 = x;
+  dq2 = 1.0;
+
+  for ( i = 2; i <= order; i++ )
+  {
+    q0 = q1;
+    dq0 = dq1;
+
+    q1 = q2;
+    dq1 = dq2;
+
+    q2  = x * q1 - 0.5 * ( ( double ) ( i ) - 1.0 ) * q0;
+    dq2 = x * dq1 + q1 - 0.5 * ( ( double ) ( i ) - 1.0 ) * dq0;
+  }
+
+  *p2 = q2;
+  *dp2 = dq2;
+  *p1 = q1;
+
+  return;
+}
+//****************************************************************************80
+
+void hermite_ss_root ( double *x, int order, double *dp2, double *p1 )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    HERMITE_SS_ROOT improves an approximate root of a Hermite polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input/output, double *X, the approximate root, which
+//    should be improved on output.
+//
+//    Input, int ORDER, the order of the Hermite polynomial.
+//
+//    Output, double *DP2, the value of H'(ORDER)(X).
+//
+//    Output, double *P1, the value of H(ORDER-1)(X).
+//
+{
+  double d;
+  double eps;
+  double p2;
+  int step;
+  int step_max = 10;
+
+  eps = webbur::r8_epsilon ( );
+
+  for ( step = 1; step <= step_max; step++ )
+  {
+    webbur::hermite_ss_recur ( &p2, dp2, p1, *x, order );
+
+    d = p2 / ( *dp2 );
+    *x = *x - d;
+
+    if ( webbur::r8_abs ( d ) <= eps * ( webbur::r8_abs ( *x ) + 1.0 ) )
+    {
+      return;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+int i4_choose ( int n, int k )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4_CHOOSE computes the binomial coefficient C(N,K).
+//
+//  Discussion:
+//
+//    The value is calculated in such a way as to avoid overflow and
+//    roundoff.  The calculation is done in integer arithmetic.
+//
+//    The formula used is:
+//
+//      C(N,K) = N! / ( K! * (N-K)! )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    09 November 2007
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    ML Wolfson, HV Wright,
+//    Algorithm 160:
+//    Combinatorial of M Things Taken N at a Time,
+//    Communications of the ACM,
+//    Volume 6, Number 4, April 1963, page 161.
+//
+//  Parameters:
+//
+//    Input, int N, K, the values of N and K.
+//
+//    Output, int I4_CHOOSE, the number of combinations of N
+//    things taken K at a time.
+//
+{
+  int i;
+  int mn;
+  int mx;
+  int value;
+
+  mn = i4_min ( k, n - k );
+
+  if ( mn < 0 )
+  {
+    value = 0;
+  }
+  else if ( mn == 0 )
+  {
+    value = 1;
+  }
+  else
+  {
+    mx = i4_max ( k, n - k );
+    value = mx + 1;
+
+    for ( i = 2; i <= mn; i++ )
+    {
+      value = ( value * ( mx + i ) ) / i;
+    }
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+int i4_log_2 ( int i )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4_LOG_2 returns the integer part of the logarithm base 2 of an I4.
+//
+//  Example:
+//
+//        I  I4_LOG_10
+//    -----  --------
+//        0    0
+//        1    0
+//        2    1
+//        3    1
+//        4    2
+//        5    2
+//        7    2
+//        8    3
+//        9    3
+//     1000    9
+//     1024   10
+//
+//  Discussion:
+//
+//    I4_LOG_2 ( I ) + 1 is the number of binary digits in I.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 January 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int I, the number whose logarithm base 2 is desired.
+//
+//    Output, int I4_LOG_2, the integer part of the logarithm base 2 of
+//    the absolute value of X.
+//
+{
+  int i_abs;
+  int two_pow;
+  int value;
+
+  if ( i == 0 )
+  {
+    value = 0;
+  }
+  else
+  {
+    value = 0;
+    two_pow = 2;
+
+    i_abs = std::abs ( i );
+
+    while ( two_pow <= i_abs )
+    {
+      value = value + 1;
+      two_pow = two_pow * 2;
+    }
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+int i4_max ( int i1, int i2 )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4_MAX returns the maximum of two I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 October 1998
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int I1, I2, are two integers to be compared.
+//
+//    Output, int I4_MAX, the larger of I1 and I2.
+//
+{
+  int value;
+
+  if ( i2 < i1 )
+  {
+    value = i1;
+  }
+  else
+  {
+    value = i2;
+  }
+  return value;
+}
+//****************************************************************************80
+
+int i4_min ( int i1, int i2 )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4_MIN returns the minimum of two I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 October 1998
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int I1, I2, two integers to be compared.
+//
+//    Output, int I4_MIN, the smaller of I1 and I2.
+//
+{
+  int value;
+
+  if ( i1 < i2 )
+  {
+    value = i1;
+  }
+  else
+  {
+    value = i2;
+  }
+  return value;
+}
+//****************************************************************************80
+
+int i4_power ( int i, int j )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4_POWER returns the value of I^J.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 April 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int I, J, the base and the power.  J should be nonnegative.
+//
+//    Output, int I4_POWER, the value of I^J.
+//
+{
+  int k;
+  int value;
+
+  if ( j < 0 )
+  {
+    if ( i == 1 )
+    {
+      value = 1;
+    }
+    else if ( i == 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "I4_POWER - Fatal error!\n";
+      std::cerr << "  I^J requested, with I = 0 and J negative.\n";
+      std::exit ( 1 );
+    }
+    else
+    {
+      value = 0;
+    }
+  }
+  else if ( j == 0 )
+  {
+    if ( i == 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "I4_POWER - Fatal error!\n";
+      std::cerr << "  I^J requested, with I = 0 and J = 0.\n";
+      std::exit ( 1 );
+    }
+    else
+    {
+      value = 1;
+    }
+  }
+  else if ( j == 1 )
+  {
+    value = i;
+  }
+  else
+  {
+    value = 1;
+    for ( k = 1; k <= j; k++ )
+    {
+      value = value * i;
+    }
+  }
+  return value;
+}
+//****************************************************************************80
+
+void i4mat_copy ( int m, int n, int a1[], int a2[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4MAT_COPY copies one I4MAT to another.
+//
+//  Discussion:
+//
+//    An I4MAT is an MxN array of I4's, stored by (I,J) -> [I+J*M].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 August 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, int A1[M*N], the matrix to be copied.
+//
+//    Output, int A2[M*N], the copy of A1.
+//
+{
+  int i;
+  int j;
+
+  for ( j = 0; j < n; j++ )
+  {
+    for ( i = 0; i < m; i++ )
+    {
+      a2[i+j*m] = a1[i+j*m];
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+int *i4mat_copy_new ( int m, int n, int a1[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4MAT_COPY_NEW copies an I4MAT to a "new" I4MAT.
+//
+//  Discussion:
+//
+//    An I4MAT is an MxN array of I4's, stored by (I,J) -> [I+J*M].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 August 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, int A1[M*N], the matrix to be copied.
+//
+//    Output, int I4MAT_COPY_NEW[M*N], the copy of A1.
+//
+{
+  int *a2;
+  int i;
+  int j;
+
+  a2 = new int[m*n];
+
+  for ( j = 0; j < n; j++ )
+  {
+    for ( i = 0; i < m; i++ )
+    {
+      a2[i+j*m] = a1[i+j*m];
+    }
+  }
+  return a2;
+}
+//****************************************************************************80
+
+void i4mat_transpose_print ( int m, int n, int a[], std::string title )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4MAT_TRANSPOSE_PRINT prints an I4MAT, transposed.
+//
+//  Discussion:
+//
+//    An I4MAT is an MxN array of I4's, stored by (I,J) -> [I+J*M].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 January 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows in A.
+//
+//    Input, int N, the number of columns in A.
+//
+//    Input, int A[M*N], the M by N matrix.
+//
+//    Input, string TITLE, a title.
+//
+{
+  i4mat_transpose_print_some ( m, n, a, 1, 1, m, n, title );
+
+  return;
+}
+//****************************************************************************80
+
+void i4mat_transpose_print_some ( int m, int n, int a[], int ilo, int jlo,
+  int ihi, int jhi, std::string title )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4MAT_TRANSPOSE_PRINT_SOME prints some of an I4MAT, transposed.
+//
+//  Discussion:
+//
+//    An I4MAT is an MxN array of I4's, stored by (I,J) -> [I+J*M].
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 June 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows of the matrix.
+//    M must be positive.
+//
+//    Input, int N, the number of columns of the matrix.
+//    N must be positive.
+//
+//    Input, int A[M*N], the matrix.
+//
+//    Input, int ILO, JLO, IHI, JHI, designate the first row and
+//    column, and the last row and column to be printed.
+//
+//    Input, string TITLE, a title.
+//
+{
+# define INCX 10
+
+  int i;
+  int i2hi;
+  int i2lo;
+  int j;
+  int j2hi;
+  int j2lo;
+
+  std::cout << "\n";
+  std::cout << title << "\n";
+//
+//  Print the columns of the matrix, in strips of INCX.
+//
+  for ( i2lo = ilo; i2lo <= ihi; i2lo = i2lo + INCX )
+  {
+    i2hi = i2lo + INCX - 1;
+    i2hi = webbur::i4_min ( i2hi, m );
+    i2hi = webbur::i4_min ( i2hi, ihi );
+
+    std::cout << "\n";
+//
+//  For each row I in the current range...
+//
+//  Write the header.
+//
+    std::cout << "  Row: ";
+    for ( i = i2lo; i <= i2hi; i++ )
+    {
+      std::cout << std::setw(6) << i - 1 << "  ";
+    }
+    std::cout << "\n";
+    std::cout << "  Col\n";
+    std::cout << "\n";
+//
+//  Determine the range of the rows in this strip.
+//
+    j2lo = webbur::i4_max ( jlo, 1 );
+    j2hi = webbur::i4_min ( jhi, n );
+
+    for ( j = j2lo; j <= j2hi; j++ )
+    {
+//
+//  Print out (up to INCX) entries in column J, that lie in the current strip.
+//
+      std::cout << std::setw(5) << j - 1 << ":";
+      for ( i = i2lo; i <= i2hi; i++ )
+      {
+        std::cout << std::setw(6) << a[i-1+(j-1)*m] << "  ";
+      }
+      std::cout << "\n";
+    }
+  }
+
+  return;
+# undef INCX
+}
+//****************************************************************************80
+
+void i4mat_write ( std::string output_filename, int m, int n, int table[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4MAT_WRITE writes an I4MAT file.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, string OUTPUT_FILENAME, the output filename.
+//
+//    Input, int M, the spatial dimension.
+//
+//    Input, int N, the number of points.
+//
+//    Input, int TABLE[M*N], the table data.
+//
+{
+  int i;
+  int j;
+  std::ofstream output;
+//
+//  Open the file.
+//
+  output.open ( output_filename.c_str ( ) );
+
+  if ( !output )
+  {
+    std::cerr << "\n";
+    std::cerr << "I4MAT_WRITE - Fatal error!\n";
+    std::cerr << "  Could not open the output file.\n";
+    return;
+  }
+//
+//  Write the data.
+//
+  for ( j = 0; j < n; j++ )
+  {
+    for ( i = 0; i < m; i++ )
+    {
+      output << std::setw(10) << table[i+j*m] << "  ";
+    }
+    output << "\n";
+  }
+//
+//  Close the file.
+//
+  output.close ( );
+
+  return;
+}
+//****************************************************************************80
+
+int *i4vec_add_new ( int n, int a[], int b[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_ADD_NEW computes C = A + B for I4VEC's.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    28 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries.
+//
+//    Input, int A[N], the first vector.
+//
+//    Input, int B[N], the second vector.
+//
+//    Output, int I4VEC_ADD_NEW[N], the sum of the vectors.
+//
+{
+  int *c;
+  int i;
+
+  c = new int[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    c[i] = a[i] + b[i];
+  }
+  return c;
+}
+//****************************************************************************80
+
+bool i4vec_any_lt ( int n, int a[], int b[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_ANY_LT: ( any ( A < B ) ) for I4VEC's.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    28 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries.
+//
+//    Input, int A[N], the first vector.
+//
+//    Input, int B[N], the second vector.
+//
+//    Output, bool I4VEC_ANY_LT is TRUE if any entry
+//    of A is less than the corresponding entry of B.
+//
+{
+  int i;
+  bool value;
+
+  for ( i = 0; i < n; i++ )
+  {
+    if ( a[i] < b[i] )
+    {
+      value = true;
+      return value;
+    }
+  }
+  value = false;
+
+  return value;
+}
+//****************************************************************************80
+
+void i4vec_copy ( int n, int a1[], int a2[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_COPY copies an I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    25 April 2007
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, int A1[N], the vector to be copied.
+//
+//    Output, int A2[N], the copy of A1.
+//
+{
+  int i;
+
+  for ( i = 0; i < n; i++ )
+  {
+    a2[i] = a1[i];
+  }
+  return;
+}
+//****************************************************************************80
+
+int *i4vec_copy_new ( int n, int a1[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_COPY_NEW copies an I4VEC to a "new" I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 July 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, int A1[N], the vector to be copied.
+//
+//    Output, int I4VEC_COPY_NEW[N], the copy of A1.
+//
+{
+  int *a2;
+  int i;
+
+  a2 = new int[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    a2[i] = a1[i];
+  }
+  return a2;
+}
+//****************************************************************************80
+
+void i4vec_min_mv ( int m, int n, int u[], int v[], int w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_MIN_MV determines U(1:N) /\ V for vectors U and a single vector V.
+//
+//  Discussion:
+//
+//    For two vectors U and V, each of length M, we define
+//
+//      ( U /\ V ) (I) = min ( U(I), V(I) ).
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    12 January 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the dimension of the vectors.
+//
+//    Input, int N, the number of vectors in U.
+//
+//    Input, int U[M*N], N vectors, each of length M.
+//
+//    Input, int V[M], a vector of length M.
+//
+//    Output, int W[M*N], the value of U /\ W.
+//
+{
+  int i;
+  int j;
+
+  for ( j = 0; j < n; j++ )
+  {
+    for ( i = 0; i < m; i++ )
+    {
+      w[i+j*m] = i4_min ( u[i+j*m], v[i] );
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void i4vec_print ( int n, int a[], std::string title )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_PRINT prints an I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 November 2003
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of components of the vector.
+//
+//    Input, int A[N], the vector to be printed.
+//
+//    Input, string TITLE, a title.
+//
+{
+  int i;
+
+  std::cout << "\n";
+  std::cout << title << "\n";
+  std::cout << "\n";
+  for ( i = 0; i < n; i++ )
+  {
+    std::cout << "  " << std::setw(8) << i
+              << ": " << std::setw(8) << a[i]  << "\n";
+  }
+  return;
+}
+//****************************************************************************80
+
+int i4vec_product ( int n, int a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_PRODUCT multiplies the entries of an I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of integer values.
+//
+//  Example:
+//
+//    Input:
+//
+//      A = ( 1, 2, 3, 4 )
+//
+//    Output:
+//
+//      I4VEC_PRODUCT = 24
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 May 2003
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input, int A[N], the vector
+//
+//    Output, int I4VEC_PRODUCT, the product of the entries of A.
+//
+{
+  int i;
+  int product;
+
+  product = 1;
+  for ( i = 0; i < n; i++ )
+  {
+    product = product * a[i];
+  }
+
+  return product;
+}
+//****************************************************************************80
+
+int i4vec_sum ( int n, int a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_SUM sums the entries of an I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Example:
+//
+//    Input:
+//
+//      A = ( 1, 2, 3, 4 )
+//
+//    Output:
+//
+//      I4VEC_SUM = 10
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    04 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input, int A[N], the vector to be summed.
+//
+//    Output, int I4VEC_SUM, the sum of the entries of A.
+//
+{
+  int i;
+  int sum;
+
+  sum = 0;
+  for ( i = 0; i < n; i++ )
+  {
+    sum = sum + a[i];
+  }
+
+  return sum;
+}
+//****************************************************************************80
+
+void i4vec_zero ( int n, int a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_ZERO zeroes an I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 August 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Output, int A[N], a vector of zeroes.
+//
+{
+  int i;
+
+  for ( i = 0; i < n; i++ )
+  {
+    a[i] = 0;
+  }
+  return;
+}
+//****************************************************************************80
+
+int *i4vec_zero_new ( int n )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    I4VEC_ZERO_NEW creates and zeroes an I4VEC.
+//
+//  Discussion:
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    11 July 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Output, int I4VEC_ZERO_NEW[N], a vector of zeroes.
+//
+{
+  int *a;
+  int i;
+
+  a = new int[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    a[i] = 0;
+  }
+  return a;
+}
+//****************************************************************************80
+
+void imtqlx ( int n, double d[], double e[], double z[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    IMTQLX diagonalizes a symmetric tridiagonal matrix.
+//
+//  Discussion:
+//
+//    This routine is a slightly modified version of the EISPACK routine to
+//    perform the implicit QL algorithm on a symmetric tridiagonal matrix.
+//
+//    The authors thank the authors of EISPACK for permission to use this
+//    routine.
+//
+//    It has been modified to produce the product Q' * Z, where Z is an input
+//    vector and Q is the orthogonal matrix diagonalizing the input matrix.
+//    The changes consist (essentially) of applying the orthogonal transformations
+//    directly to Z as they are generated.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 January 2010
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//    Roger Martin, James Wilkinson,
+//    The Implicit QL Algorithm,
+//    Numerische Mathematik,
+//    Volume 12, Number 5, December 1968, pages 377-383.
+//
+//  Parameters:
+//
+//    Input, int N, the order of the matrix.
+//
+//    Input/output, double D(N), the diagonal entries of the matrix.
+//    On output, the information in D has been overwritten.
+//
+//    Input/output, double E(N), the subdiagonal entries of the
+//    matrix, in entries E(1) through E(N-1).  On output, the information in
+//    E has been overwritten.
+//
+//    Input/output, double Z(N).  On input, a vector.  On output,
+//    the value of Q' * Z, where Q is the matrix that diagonalizes the
+//    input symmetric tridiagonal matrix.
+//
+{
+  double b;
+  double c;
+  double f;
+  double g;
+  int i;
+  int ii;
+  int itn = 30;
+  int j;
+  int k;
+  int l;
+  int m;
+  int mml;
+  double p;
+  double prec;
+  double r;
+  double s;
+
+  prec = webbur::r8_epsilon ( );
+
+  if ( n == 1 )
+  {
+    return;
+  }
+
+  e[n-1] = 0.0;
+
+  for ( l = 1; l <= n; l++ )
+  {
+    j = 0;
+    for ( ; ; )
+    {
+      for ( m = l; m <= n; m++ )
+      {
+        if ( m == n )
+        {
+          break;
+        }
+
+        if ( webbur::r8_abs ( e[m-1] ) <=
+          prec * ( webbur::r8_abs ( d[m-1] ) + webbur::r8_abs ( d[m] ) ) )
+        {
+          break;
+        }
+      }
+      p = d[l-1];
+      if ( m == l )
+      {
+        break;
+      }
+      if ( itn <= j )
+      {
+        std::cerr << "\n";
+        std::cerr << "IMTQLX - Fatal error!\n";
+        std::cerr << "  Iteration limit exceeded\n";
+        std::exit ( 1 );
+      }
+      j = j + 1;
+      g = ( d[l] - p ) / ( 2.0 * e[l-1] );
+      r = std::sqrt ( g * g + 1.0 );
+      g = d[m-1] - p + e[l-1] / ( g + webbur::r8_abs ( r ) * webbur::r8_sign ( g ) );
+      s = 1.0;
+      c = 1.0;
+      p = 0.0;
+      mml = m - l;
+
+      for ( ii = 1; ii <= mml; ii++ )
+      {
+        i = m - ii;
+        f = s * e[i-1];
+        b = c * e[i-1];
+
+        if ( webbur::r8_abs ( g ) <= webbur::r8_abs ( f ) )
+        {
+          c = g / f;
+          r = std::sqrt ( c * c + 1.0 );
+          e[i] = f * r;
+          s = 1.0 / r;
+          c = c * s;
+        }
+        else
+        {
+          s = f / g;
+          r = std::sqrt ( s * s + 1.0 );
+          e[i] = g * r;
+          c = 1.0 / r;
+          s = s * c;
+        }
+        g = d[i] - p;
+        r = ( d[i-1] - g ) * s + 2.0 * c * b;
+        p = s * r;
+        d[i] = g + p;
+        g = c * r - b;
+        f = z[i];
+        z[i] = s * z[i-1] + c * f;
+        z[i-1] = c * z[i-1] - s * f;
+      }
+      d[l-1] = d[l-1] - p;
+      e[l-1] = g;
+      e[m-1] = 0.0;
+    }
+  }
+//
+//  Sorting.
+//
+  for ( ii = 2; ii <= m; ii++ )
+  {
+    i = ii - 1;
+    k = i;
+    p = d[i-1];
+
+    for ( j = ii; j <= n; j++ )
+    {
+      if ( d[j-1] < p )
+      {
+         k = j;
+         p = d[j-1];
+      }
+    }
+
+    if ( k != i )
+    {
+      d[k-1] = d[i-1];
+      d[i-1] = p;
+      p = z[i-1];
+      z[i-1] = z[k-1];
+      z[k-1] = p;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void jacobi_compute ( int n, double alpha, double beta, double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_COMPUTE: Elhay-Kautsky method for Gauss-Jacobi quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) (1-X)**ALPHA * (1+X)**BETA * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) WEIGHT(I) * F ( XTAB(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    30 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, double ALPHA, BETA, the exponents of (1-X) and
+//    (1+X) in the quadrature rule.  For simple Gauss-Legendre quadrature,
+//    set ALPHA = BETA = 0.0.  -1.0 < ALPHA and -1.0 < BETA are required.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double abi;
+  double *bj;
+  int i;
+  double i_r8;
+  double zemu;
+//
+//  Define the zero-th moment.
+//
+  zemu = std::pow ( 2.0, alpha + beta + 1.0 )
+    * webbur::r8_gamma ( alpha + 1.0 )
+    * webbur::r8_gamma ( beta + 1.0 )
+    / webbur::r8_gamma ( 2.0 + alpha + beta );
+//
+//  Define the Jacobi matrix.
+//
+  bj = new double[n];
+
+  x[0] = ( beta - alpha ) / ( 2.0 + alpha + beta );
+
+  bj[0] = 4.0 * ( 1.0 + alpha ) * ( 1.0 + beta )
+    / ( ( 3.0 + alpha + beta )
+      * ( 2.0 + alpha + beta ) * ( 2.0 + alpha + beta ) );
+
+  for ( i = 1; i < n; i++ )
+  {
+    i_r8 = ( double ) ( i + 1 );
+    abi = 2.0 * i_r8 + alpha + beta;
+    x[i] = ( beta + alpha ) * ( beta - alpha ) / ( ( abi - 2.0 ) * abi );
+    bj[i] = 4.0 * i_r8 * ( i_r8 + alpha ) * ( i_r8 + beta )
+      * ( i_r8 + alpha + beta )
+      / ( ( abi - 1.0 ) * ( abi + 1.0 ) * abi * abi );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    bj[i] = std::sqrt ( bj[i] );
+  }
+
+  w[0] = std::sqrt ( zemu );
+
+  for ( i = 1; i < n; i++ )
+  {
+    w[i] = 0.0;
+  }
+//
+//  Diagonalize the Jacobi matrix.
+//
+  webbur::imtqlx ( n, x, bj, w );
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = w[i] * w[i];
+  }
+
+  delete [] bj;
+
+  return;
+}
+//****************************************************************************80
+
+void jacobi_compute_np ( int order, int np, double p[], double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_COMPUTE_NP computes a Jacobi quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) (1-X)^ALPHA * (1+X)^BETA * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//    Thanks to Xu Xiang of Fudan University for pointing out that
+//    an earlier implementation of this routine was incorrect!
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameter values.
+//    P[0] = ALPHA, the exponent of (1-X)
+//    P[1] = BETA,  the exponent of (1+X).
+//    -1.0 < ALPHA and -1.0 < BETA are required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+  double beta;
+
+  alpha = p[0];
+  beta = p[1];
+
+  webbur::jacobi_compute ( order, alpha, beta, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void jacobi_compute_points ( int order, double alpha, double beta,
+  double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_COMPUTE_POINTS computes Jacobi quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 October 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, double ALPHA, BETA, the exponents of the (1-X) and (1+X) factors.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double *w;
+
+  w = new double[order];
+
+  webbur::jacobi_compute ( order, alpha, beta, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void jacobi_compute_points_np ( int order, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_COMPUTE_POINTS_NP computes Jacobi quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameter values.
+//    P[0] = ALPHA, the exponent of (1-X)
+//    P[1] = BETA,  the exponent of (1+X).
+//    -1.0 < ALPHA and -1.0 < BETA are required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double alpha;
+  double beta;
+
+  alpha = p[0];
+  beta = p[1];
+
+  webbur::jacobi_compute_points ( order, alpha, beta, x );
+
+  return;
+}
+//****************************************************************************80
+
+void jacobi_compute_weights ( int order, double alpha, double beta,
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_COMPUTE_WEIGHTS computes Jacobi quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 October 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, double ALPHA, BETA, the exponents of the (1-X) and (1+X) factors.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *x;
+
+  x = new double[order];
+
+  webbur::jacobi_compute ( order, alpha, beta, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void jacobi_compute_weights_np ( int order, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_COMPUTE_WEIGHTS_NP computes Jacobi quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameter values.
+//    P[0] = ALPHA, the exponent of (1-X)
+//    P[1] = BETA,  the exponent of (1+X).
+//    -1.0 < ALPHA and -1.0 < BETA are required.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double alpha;
+  double beta;
+
+  alpha = p[0];
+  beta = p[1];
+
+  webbur::jacobi_compute_weights ( order, alpha, beta, w );
+
+  return;
+}
+//****************************************************************************80
+
+double jacobi_integral ( int expon, double alpha, double beta )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_INTEGRAL integrates a monomial with Jacobi weight.
+//
+//  Discussion:
+//
+//    VALUE = Integral ( -1 <= X <= +1 ) x^EXPON (1-x)^ALPHA (1+x)^BETA dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 September 2007
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent.
+//
+//    Input, double ALPHA, the exponent of (1-X) in the weight factor.
+//
+//    Input, double BETA, the exponent of (1+X) in the weight factor.
+//
+//    Output, double JACOBI_INTEGRAL, the value of the integral.
+//
+{
+  double arg1;
+  double arg2;
+  double arg3;
+  double arg4;
+  double c;
+  double s;
+  double value;
+  double value1;
+  double value2;
+
+  c = ( double ) ( expon );
+
+  if ( ( expon % 2 ) == 0 )
+  {
+    s = +1.0;
+  }
+  else
+  {
+    s = -1.0;
+  }
+
+  arg1 = - alpha;
+  arg2 =   1.0 + c;
+  arg3 =   2.0 + beta + c;
+  arg4 = - 1.0;
+
+  value1 = webbur::r8_hyper_2f1 ( arg1, arg2, arg3, arg4 );
+
+  arg1 = - beta;
+  arg2 =   1.0 + c;
+  arg3 =   2.0 + alpha + c;
+  arg4 = - 1.0;
+
+  value2 = webbur::r8_hyper_2f1 ( arg1, arg2, arg3, arg4 );
+
+  value = webbur::r8_gamma ( 1.0 + c ) * (
+      s * webbur::r8_gamma ( 1.0 + beta  ) * value1
+    / webbur::r8_gamma ( 2.0 + beta  + c )
+    +     webbur::r8_gamma ( 1.0 + alpha ) * value2
+    / webbur::r8_gamma ( 2.0 + alpha + c ) );
+
+  return value;
+}
+//****************************************************************************80
+
+void jacobi_ss_compute ( int order, double alpha, double beta, double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_SS_COMPUTE computes a Jacobi quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) (1-X)^ALPHA * (1+X)^BETA * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//    Thanks to Xu Xiang of Fudan University for pointing out that
+//    an earlier implementation of this routine was incorrect!
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, double ALPHA, BETA, the exponents of (1-X) and
+//    (1+X) in the quadrature rule.  For simple Legendre quadrature,
+//    set ALPHA = BETA = 0.0.  -1.0 < ALPHA and -1.0 < BETA are required.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double an;
+  double *b;
+  double bn;
+  double *c;
+  double cc;
+  double delta;
+  double dp2;
+  int i;
+  double p1;
+  double prod;
+  double r1;
+  double r2;
+  double r3;
+  double temp;
+  double x0;
+
+  if ( order < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "JACOBI_SS_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of ORDER = " << order << "\n";
+    std::exit ( 1 );
+  }
+
+  b = new double[order];
+  c = new double[order];
+//
+//  Check ALPHA and BETA.
+//
+  if ( alpha <= -1.0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "JACOBI_SS_COMPUTE - Fatal error!\n";
+    std::cerr << "  -1.0 < ALPHA is required.\n";
+    std::exit ( 1 );
+  }
+
+  if ( beta <= -1.0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "JACOBI_SS_COMPUTE - Fatal error!\n";
+    std::cerr << "  -1.0 < BETA is required.\n";
+    std::exit ( 1 );
+  }
+//
+//  Set the recursion coefficients.
+//
+  for ( i = 1; i <= order; i++ )
+  {
+    if ( alpha + beta == 0.0 || beta - alpha == 0.0 )
+    {
+      b[i-1] = 0.0;
+    }
+    else
+    {
+      b[i-1] = ( alpha + beta ) * ( beta - alpha ) /
+             ( ( alpha + beta + ( double ) ( 2 * i ) )
+             * ( alpha + beta + ( double ) ( 2 * i - 2 ) ) );
+    }
+
+    if ( i == 1 )
+    {
+      c[i-1] = 0.0;
+    }
+    else
+    {
+      c[i-1] = 4.0 * ( double ) ( i - 1 )
+         * ( alpha + ( double ) ( i - 1 ) )
+          * ( beta + ( double ) ( i - 1 ) )
+            * ( alpha + beta + ( double ) ( i - 1 ) ) /
+            ( ( alpha + beta + ( double ) ( 2 * i - 1 ) )
+            * std::pow ( alpha + beta + ( double ) ( 2 * i - 2 ), 2 )
+            * ( alpha + beta + ( double ) ( 2 * i - 3 ) ) );
+    }
+  }
+
+  delta = webbur::r8_gamma ( alpha        + 1.0 )
+        * webbur::r8_gamma (         beta + 1.0 )
+        / webbur::r8_gamma ( alpha + beta + 2.0 );
+
+  prod = 1.0;
+  for ( i = 2; i <= order; i++ )
+  {
+    prod = prod * c[i-1];
+  }
+  cc = delta * std::pow ( 2.0, alpha + beta + 1.0 ) * prod;
+
+  for ( i = 1; i <= order; i++ )
+  {
+    if ( i == 1 )
+    {
+      an = alpha / ( double ) ( order );
+      bn = beta / ( double ) ( order );
+
+      r1 = ( 1.0 + alpha )
+        * ( 2.78 / ( 4.0 + ( double ) ( order * order ) )
+        + 0.768 * an / ( double ) ( order ) );
+
+      r2 = 1.0 + 1.48 * an + 0.96 * bn
+        + 0.452 * an * an + 0.83 * an * bn;
+
+      x0 = ( r2 - r1 ) / r2;
+    }
+    else if ( i == 2 )
+    {
+      r1 = ( 4.1 + alpha ) /
+        ( ( 1.0 + alpha ) * ( 1.0 + 0.156 * alpha ) );
+
+      r2 = 1.0 + 0.06 * ( ( double ) ( order ) - 8.0 ) *
+        ( 1.0 + 0.12 * alpha ) / ( double ) ( order );
+
+      r3 = 1.0 + 0.012 * beta *
+        ( 1.0 + 0.25 * r8_abs ( alpha ) ) / ( double ) ( order );
+
+      x0 = x0 - r1 * r2 * r3 * ( 1.0 - x0 );
+    }
+    else if ( i == 3 )
+    {
+      r1 = ( 1.67 + 0.28 * alpha ) / ( 1.0 + 0.37 * alpha );
+
+      r2 = 1.0 + 0.22 * ( ( double ) ( order ) - 8.0 )
+        / ( double ) ( order );
+
+      r3 = 1.0 + 8.0 * beta /
+        ( ( 6.28 + beta ) * ( double ) ( order * order ) );
+
+      x0 = x0 - r1 * r2 * r3 * ( x[0] - x0 );
+    }
+    else if ( i < order - 1 )
+    {
+      x0 = 3.0 * x[i-2] - 3.0 * x[i-3] + x[i-4];
+    }
+    else if ( i == order - 1 )
+    {
+      r1 = ( 1.0 + 0.235 * beta ) / ( 0.766 + 0.119 * beta );
+
+      r2 = 1.0 / ( 1.0 + 0.639
+        * ( ( double ) ( order ) - 4.0 )
+        / ( 1.0 + 0.71 * ( ( double ) ( order ) - 4.0 ) ) );
+
+      r3 = 1.0 / ( 1.0 + 20.0 * alpha / ( ( 7.5 + alpha ) *
+        ( double ) ( order * order ) ) );
+
+      x0 = x0 + r1 * r2 * r3 * ( x0 - x[i-3] );
+    }
+    else if ( i == order )
+    {
+      r1 = ( 1.0 + 0.37 * beta ) / ( 1.67 + 0.28 * beta );
+
+      r2 = 1.0 /
+        ( 1.0 + 0.22 * ( ( double ) ( order ) - 8.0 )
+        / ( double ) ( order ) );
+
+      r3 = 1.0 / ( 1.0 + 8.0 * alpha /
+        ( ( 6.28 + alpha ) * ( double ) ( order * order ) ) );
+
+      x0 = x0 + r1 * r2 * r3 * ( x0 - x[i-3] );
+    }
+
+    webbur::jacobi_ss_root ( &x0, order, alpha, beta, &dp2, &p1, b, c );
+
+    x[i-1] = x0;
+    w[i-1] = cc / ( dp2 * p1 );
+  }
+//
+//  Reverse the order of the values.
+//
+  for ( i = 1; i <= order/2; i++ )
+  {
+    temp       = x[i-1];
+    x[i-1]     = x[order-i];
+    x[order-i] = temp;
+  }
+
+  for ( i = 1; i <=order/2; i++ )
+  {
+    temp       = w[i-1];
+    w[i-1]     = w[order-i];
+    w[order-i] = temp;
+  }
+
+  delete [] b;
+  delete [] c;
+
+  return;
+}
+//****************************************************************************80
+
+void jacobi_ss_recur ( double *p2, double *dp2, double *p1, double x, int order,
+  double alpha, double beta, double b[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_SS_RECUR evaluates a Jacobi polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Output, double *P2, the value of J(ORDER)(X).
+//
+//    Output, double *DP2, the value of J'(ORDER)(X).
+//
+//    Output, double *P1, the value of J(ORDER-1)(X).
+//
+//    Input, double X, the point at which polynomials are evaluated.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double ALPHA, BETA, the exponents of (1-X) and
+//    (1+X) in the quadrature rule.
+//
+//    Input, double B[ORDER], C[ORDER], the recursion coefficients.
+//
+{
+  double dp0;
+  double dp1;
+  int i;
+  double p0;
+
+  *p1 = 1.0;
+  dp1 = 0.0;
+
+  *p2 = x + ( alpha - beta ) / ( alpha + beta + 2.0 );
+  *dp2 = 1.0;
+
+  for ( i = 2; i <= order; i++ )
+  {
+    p0 = *p1;
+    dp0 = dp1;
+
+    *p1 = *p2;
+    dp1 = *dp2;
+
+    *p2 = ( x - b[i-1] ) *  ( *p1 ) - c[i-1] * p0;
+    *dp2 = ( x - b[i-1] ) * dp1 + ( *p1 ) - c[i-1] * dp0;
+  }
+  return;
+}
+//****************************************************************************80
+
+void jacobi_ss_root ( double *x, int order, double alpha, double beta,
+  double *dp2, double *p1, double b[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    JACOBI_SS_ROOT improves an approximate root of a Jacobi polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input/output, double *X, the approximate root, which
+//    should be improved on output.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double ALPHA, BETA, the exponents of (1-X) and
+//    (1+X) in the quadrature rule.
+//
+//    Output, double *DP2, the value of J'(ORDER)(X).
+//
+//    Output, double *P1, the value of J(ORDER-1)(X).
+//
+//    Input, double B[ORDER], C[ORDER], the recursion coefficients.
+//
+{
+  double d;
+  double eps;
+  double p2;
+  int step;
+  int step_max = 10;
+
+  eps = webbur::r8_epsilon ( );
+
+  for ( step = 1; step <= step_max; step++ )
+  {
+    webbur::jacobi_ss_recur ( &p2, dp2, p1, *x, order, alpha, beta, b, c );
+
+    d = p2 / ( *dp2 );
+    *x = *x - d;
+
+    if ( webbur::r8_abs ( d ) <= eps * ( webbur::r8_abs ( *x ) + 1.0 ) )
+    {
+      return;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void laguerre_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_COMPUTE: Laguerre quadrature rule by the Elhay-Kautsky method.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *bj;
+  int i;
+  double zemu;
+//
+//  Define the zero-th moment.
+//
+  zemu = 1.0;
+//
+//  Define the Jacobi matrix.
+//
+  bj = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    bj[i] = ( double ) ( i + 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = ( double ) ( 2 * i + 1 );
+  }
+
+  w[0] = std::sqrt ( zemu );
+
+  for ( i = 1; i < n; i++ )
+  {
+    w[i] = 0.0;
+  }
+//
+//  Diagonalize the Jacobi matrix.
+//
+  webbur::imtqlx ( n, x, bj, w );
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = w[i] * w[i];
+  }
+
+  delete [] bj;
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_compute_np ( int order, int np, double p[], double x[],
+  double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_COMPUTE_NP computes a Laguerre quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( 0 <= X < +oo ) exp ( - X ) * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//    The integral:
+//
+//      Integral ( A <= X < +oo ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * exp ( X(I) ) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  webbur::laguerre_compute ( order, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_compute_points ( int order, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_COMPUTE_POINTS computes Laguerre quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  double *w;
+
+  w = new double[order];
+
+  webbur::laguerre_compute ( order, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_compute_points_np ( int order, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_COMPUTE_POINTS_NP computes Laguerre quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+{
+  webbur::laguerre_compute_points ( order, x );
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_compute_weights ( int order, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_COMPUTE_WEIGHTS computes Laguerre quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *x;
+
+  x = new double[order];
+
+  webbur::laguerre_compute ( order, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_compute_weights_np ( int order, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_COMPUTE_WEIGHTS_NP computes Laguerre quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  webbur::laguerre_compute_weights ( order, w );
+
+  return;
+}
+//****************************************************************************80
+
+double laguerre_integral ( int expon )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_INTEGRAL evaluates a monomial Laguerre integral.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( 0 <= x < +oo ) x^n * exp ( -x ) dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent.
+//    0 <= EXPON.
+//
+//    Output, double EXACT, the value of the integral.
+//
+{
+  double exact;
+
+  exact = webbur::r8_factorial ( expon );
+
+  return exact;
+}
+//****************************************************************************80
+
+void laguerre_lookup ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_LOOKUP looks up abscissas and weights for Laguerre quadrature.
+//
+//  Discussion:
+//
+//    The abscissas are the zeroes of the Laguerre polynomial L(N)(X).
+//
+//    The integral:
+//
+//      Integral ( 0 <= X < +oo ) exp ( -X ) * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * f ( X(I) )
+//
+//    The integral:
+//
+//      Integral ( 0 <= X < +oo ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * exp ( X(I) ) * f ( X(I) )
+//
+//    Mathematica can numerically estimate the abscissas for the
+//    n-th order polynomial to p digits of precision by the command:
+//
+//      NSolve [ LaguerreL[n,x] == 0, x, p ]
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798,
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 20.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::laguerre_lookup_points ( n, x );
+
+  webbur::laguerre_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_LOOKUP_POINTS looks up abscissas for Laguerre quadrature.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798,
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 20.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[0] =  1.00000000000000000000000000000E+00;
+  }
+  else if ( n == 2 )
+  {
+    x[0] = 0.585786437626904951198311275790E+00;
+    x[1] = 3.41421356237309504880168872421E+00;
+  }
+  else if ( n == 3 )
+  {
+    x[0] = 0.415774556783479083311533873128E+00;
+    x[1] = 2.29428036027904171982205036136E+00;
+    x[2] = 6.28994508293747919686641576551E+00;
+  }
+  else if ( n == 4 )
+  {
+    x[0] = 0.322547689619392311800361459104E+00;
+    x[1] = 1.74576110115834657568681671252E+00;
+    x[2] = 4.53662029692112798327928538496E+00;
+    x[3] = 9.39507091230113312923353644342E+00;
+  }
+  else if ( n == 5 )
+  {
+    x[0] = 0.263560319718140910203061943361E+00;
+    x[1] = 1.41340305910651679221840798019E+00;
+    x[2] = 3.59642577104072208122318658878E+00;
+    x[3] = 7.08581000585883755692212418111E+00;
+    x[4] = 12.6408008442757826594332193066E+00;
+  }
+  else if ( n == 6 )
+  {
+    x[0] = 0.222846604179260689464354826787E+00;
+    x[1] = 1.18893210167262303074315092194E+00;
+    x[2] = 2.99273632605931407769132528451E+00;
+    x[3] = 5.77514356910451050183983036943E+00;
+    x[4] = 9.83746741838258991771554702994E+00;
+    x[5] = 15.9828739806017017825457915674E+00;
+  }
+  else if ( n == 7 )
+  {
+    x[0] = 0.193043676560362413838247885004E+00;
+    x[1] = 1.02666489533919195034519944317E+00;
+    x[2] = 2.56787674495074620690778622666E+00;
+    x[3] = 4.90035308452648456810171437810E+00;
+    x[4] = 8.18215344456286079108182755123E+00;
+    x[5] = 12.7341802917978137580126424582E+00;
+    x[6] = 19.3957278622625403117125820576E+00;
+  }
+  else if ( n == 8 )
+  {
+    x[0] = 0.170279632305100999788861856608E+00;
+    x[1] = 0.903701776799379912186020223555E+00;
+    x[2] = 2.25108662986613068930711836697E+00;
+    x[3] = 4.26670017028765879364942182690E+00;
+    x[4] = 7.04590540239346569727932548212E+00;
+    x[5] = 10.7585160101809952240599567880E+00;
+    x[6] = 15.7406786412780045780287611584E+00;
+    x[7] = 22.8631317368892641057005342974E+00;
+  }
+  else if ( n == 9 )
+  {
+    x[0] = 0.152322227731808247428107073127E+00;
+    x[1] = 0.807220022742255847741419210952E+00;
+    x[2] = 2.00513515561934712298303324701E+00;
+    x[3] = 3.78347397333123299167540609364E+00;
+    x[4] = 6.20495677787661260697353521006E+00;
+    x[5] = 9.37298525168757620180971073215E+00;
+    x[6] = 13.4662369110920935710978818397E+00;
+    x[7] = 18.8335977889916966141498992996E+00;
+    x[8] = 26.3740718909273767961410072937E+00;
+  }
+  else if ( n == 10 )
+  {
+    x[0] = 0.137793470540492430830772505653E+00;
+    x[1] = 0.729454549503170498160373121676E+00;
+    x[2] = 1.80834290174031604823292007575E+00;
+    x[3] = 3.40143369785489951448253222141E+00;
+    x[4] = 5.55249614006380363241755848687E+00;
+    x[5] = 8.33015274676449670023876719727E+00;
+    x[6] = 11.8437858379000655649185389191E+00;
+    x[7] = 16.2792578313781020995326539358E+00;
+    x[8] = 21.9965858119807619512770901956E+00;
+    x[9] = 29.9206970122738915599087933408E+00;
+  }
+  else if ( n == 11 )
+  {
+    x[0] = 0.125796442187967522675794577516E+00;
+    x[1] = 0.665418255839227841678127839420E+00;
+    x[2] = 1.64715054587216930958700321365E+00;
+    x[3] = 3.09113814303525495330195934259E+00;
+    x[4] = 5.02928440157983321236999508366E+00;
+    x[5] = 7.50988786380661681941099714450E+00;
+    x[6] = 10.6059509995469677805559216457E+00;
+    x[7] = 14.4316137580641855353200450349E+00;
+    x[8] = 19.1788574032146786478174853989E+00;
+    x[9] = 25.2177093396775611040909447797E+00;
+    x[10] = 33.4971928471755372731917259395E+00;
+  }
+  else if ( n == 12 )
+  {
+    x[0] = 0.115722117358020675267196428240E+00;
+    x[1] = 0.611757484515130665391630053042E+00;
+    x[2] = 1.51261026977641878678173792687E+00;
+    x[3] = 2.83375133774350722862747177657E+00;
+    x[4] = 4.59922763941834848460572922485E+00;
+    x[5] = 6.84452545311517734775433041849E+00;
+    x[6] = 9.62131684245686704391238234923E+00;
+    x[7] = 13.0060549933063477203460524294E+00;
+    x[8] = 17.1168551874622557281840528008E+00;
+    x[9] = 22.1510903793970056699218950837E+00;
+    x[10] = 28.4879672509840003125686072325E+00;
+    x[11] = 37.0991210444669203366389142764E+00;
+  }
+  else if ( n == 13 )
+  {
+    x[0] = 0.107142388472252310648493376977E+00;
+    x[1] = 0.566131899040401853406036347177E+00;
+    x[2] = 1.39856433645101971792750259921E+00;
+    x[3] = 2.61659710840641129808364008472E+00;
+    x[4] = 4.23884592901703327937303389926E+00;
+    x[5] = 6.29225627114007378039376523025E+00;
+    x[6] = 8.81500194118697804733348868036E+00;
+    x[7] = 11.8614035888112425762212021880E+00;
+    x[8] = 15.5107620377037527818478532958E+00;
+    x[9] = 19.8846356638802283332036594634E+00;
+    x[10] = 25.1852638646777580842970297823E+00;
+    x[11] = 31.8003863019472683713663283526E+00;
+    x[12] = 40.7230086692655795658979667001E+00;
+  }
+  else if ( n == 14 )
+  {
+    x[0] = 0.0997475070325975745736829452514E+00;
+    x[1] = 0.526857648851902896404583451502E+00;
+    x[2] = 1.30062912125149648170842022116E+00;
+    x[3] = 2.43080107873084463616999751038E+00;
+    x[4] = 3.93210282229321888213134366778E+00;
+    x[5] = 5.82553621830170841933899983898E+00;
+    x[6] = 8.14024014156514503005978046052E+00;
+    x[7] = 10.9164995073660188408130510904E+00;
+    x[8] = 14.2108050111612886831059780825E+00;
+    x[9] = 18.1048922202180984125546272083E+00;
+    x[10] = 22.7233816282696248232280886985E+00;
+    x[11] = 28.2729817232482056954158923218E+00;
+    x[12] = 35.1494436605924265828643121364E+00;
+    x[13] = 44.3660817111174230416312423666E+00;
+  }
+  else if ( n == 15 )
+  {
+    x[0] = 0.0933078120172818047629030383672E+00;
+    x[1] = 0.492691740301883908960101791412E+00;
+    x[2] = 1.21559541207094946372992716488E+00;
+    x[3] = 2.26994952620374320247421741375E+00;
+    x[4] = 3.66762272175143727724905959436E+00;
+    x[5] = 5.42533662741355316534358132596E+00;
+    x[6] = 7.56591622661306786049739555812E+00;
+    x[7] = 10.1202285680191127347927394568E+00;
+    x[8] = 13.1302824821757235640991204176E+00;
+    x[9] = 16.6544077083299578225202408430E+00;
+    x[10] = 20.7764788994487667729157175676E+00;
+    x[11] = 25.6238942267287801445868285977E+00;
+    x[12] = 31.4075191697539385152432196202E+00;
+    x[13] = 38.5306833064860094162515167595E+00;
+    x[14] = 48.0260855726857943465734308508E+00;
+  }
+  else if ( n == 16 )
+  {
+    x[0] = 0.0876494104789278403601980973401E+00;
+    x[1] = 0.462696328915080831880838260664E+00;
+    x[2] = 1.14105777483122685687794501811E+00;
+    x[3] = 2.12928364509838061632615907066E+00;
+    x[4] = 3.43708663389320664523510701675E+00;
+    x[5] = 5.07801861454976791292305830814E+00;
+    x[6] = 7.07033853504823413039598947080E+00;
+    x[7] = 9.43831433639193878394724672911E+00;
+    x[8] = 12.2142233688661587369391246088E+00;
+    x[9] = 15.4415273687816170767647741622E+00;
+    x[10] = 19.1801568567531348546631409497E+00;
+    x[11] = 23.5159056939919085318231872752E+00;
+    x[12] = 28.5787297428821403675206137099E+00;
+    x[13] = 34.5833987022866258145276871778E+00;
+    x[14] = 41.9404526476883326354722330252E+00;
+    x[15] = 51.7011603395433183643426971197E+00;
+  }
+  else if ( n == 17 )
+  {
+    x[0] = 0.0826382147089476690543986151980E+00;
+    x[1] = 0.436150323558710436375959029847E+00;
+    x[2] = 1.07517657751142857732980316755E+00;
+    x[3] = 2.00519353164923224070293371933E+00;
+    x[4] = 3.23425612404744376157380120696E+00;
+    x[5] = 4.77351351370019726480932076262E+00;
+    x[6] = 6.63782920536495266541643929703E+00;
+    x[7] = 8.84668551116980005369470571184E+00;
+    x[8] = 11.4255293193733525869726151469E+00;
+    x[9] = 14.4078230374813180021982874959E+00;
+    x[10] = 17.8382847307011409290658752412E+00;
+    x[11] = 21.7782682577222653261749080522E+00;
+    x[12] = 26.3153178112487997766149598369E+00;
+    x[13] = 31.5817716804567331343908517497E+00;
+    x[14] = 37.7960938374771007286092846663E+00;
+    x[15] = 45.3757165339889661829258363215E+00;
+    x[16] = 55.3897517898396106640900199790E+00;
+  }
+  else if ( n == 18 )
+  {
+    x[0] = 0.0781691666697054712986747615334E+00;
+    x[1] = 0.412490085259129291039101536536E+00;
+    x[2] = 1.01652017962353968919093686187E+00;
+    x[3] = 1.89488850996976091426727831954E+00;
+    x[4] = 3.05435311320265975115241130719E+00;
+    x[5] = 4.50420553888989282633795571455E+00;
+    x[6] = 6.25672507394911145274209116326E+00;
+    x[7] = 8.32782515660563002170470261564E+00;
+    x[8] = 10.7379900477576093352179033397E+00;
+    x[9] = 13.5136562075550898190863812108E+00;
+    x[10] = 16.6893062819301059378183984163E+00;
+    x[11] = 20.3107676262677428561313764553E+00;
+    x[12] = 24.4406813592837027656442257980E+00;
+    x[13] = 29.1682086625796161312980677805E+00;
+    x[14] = 34.6279270656601721454012429438E+00;
+    x[15] = 41.0418167728087581392948614284E+00;
+    x[16] = 48.8339227160865227486586093290E+00;
+    x[17] = 59.0905464359012507037157810181E+00;
+  }
+  else if ( n == 19 )
+  {
+    x[0] = 0.0741587837572050877131369916024E+00;
+    x[1] = 0.391268613319994607337648350299E+00;
+    x[2] = 0.963957343997958058624878377130E+00;
+    x[3] = 1.79617558206832812557725825252E+00;
+    x[4] = 2.89365138187378399116494713237E+00;
+    x[5] = 4.26421553962776647436040018167E+00;
+    x[6] = 5.91814156164404855815360191408E+00;
+    x[7] = 7.86861891533473373105668358176E+00;
+    x[8] = 10.1324237168152659251627415800E+00;
+    x[9] = 12.7308814638423980045092979656E+00;
+    x[10] = 15.6912783398358885454136069861E+00;
+    x[11] = 19.0489932098235501532136429732E+00;
+    x[12] = 22.8508497608294829323930586693E+00;
+    x[13] = 27.1606693274114488789963947149E+00;
+    x[14] = 32.0691222518622423224362865906E+00;
+    x[15] = 37.7129058012196494770647508283E+00;
+    x[16] = 44.3173627958314961196067736013E+00;
+    x[17] = 52.3129024574043831658644222420E+00;
+    x[18] = 62.8024231535003758413504690673E+00;
+  }
+  else if ( n == 20 )
+  {
+    x[0] = 0.0705398896919887533666890045842E+00;
+    x[1] = 0.372126818001611443794241388761E+00;
+    x[2] = 0.916582102483273564667716277074E+00;
+    x[3] = 1.70730653102834388068768966741E+00;
+    x[4] = 2.74919925530943212964503046049E+00;
+    x[5] = 4.04892531385088692237495336913E+00;
+    x[6] = 5.61517497086161651410453988565E+00;
+    x[7] = 7.45901745367106330976886021837E+00;
+    x[8] = 9.59439286958109677247367273428E+00;
+    x[9] = 12.0388025469643163096234092989E+00;
+    x[10] = 14.8142934426307399785126797100E+00;
+    x[11] = 17.9488955205193760173657909926E+00;
+    x[12] = 21.4787882402850109757351703696E+00;
+    x[13] = 25.4517027931869055035186774846E+00;
+    x[14] = 29.9325546317006120067136561352E+00;
+    x[15] = 35.0134342404790000062849359067E+00;
+    x[16] = 40.8330570567285710620295677078E+00;
+    x[17] = 47.6199940473465021399416271529E+00;
+    x[18] = 55.8107957500638988907507734445E+00;
+    x[19] = 66.5244165256157538186403187915E+00;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LAGUERRE_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::cerr << "  Legal values are 1 through 20.\n";
+    std::exit ( 1 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_lookup_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_LOOKUP_WEIGHTS looks up weights for Laguerre quadrature.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798,
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 20.
+//
+//    Output, double W[N], the weights.
+//
+{
+  if ( n == 1 )
+  {
+    w[0] =  1.00000000000000000000000000000E+00;
+  }
+  else if ( n == 2 )
+  {
+    w[0] = 0.85355339059327376220042218105E+00;
+    w[1] = 0.146446609406726237799577818948E+00;
+  }
+  else if ( n == 3 )
+  {
+    w[0] = 0.71109300992917301544959019114E+00;
+    w[1] = 0.27851773356924084880144488846E+00;
+    w[2] = 0.010389256501586135748964920401E+00;
+  }
+  else if ( n == 4 )
+  {
+    w[0] = 0.60315410434163360163596602382E+00;
+    w[1] = 0.35741869243779968664149201746E+00;
+    w[2] = 0.03888790851500538427243816816E+00;
+    w[3] = 0.0005392947055613274501037905676E+00;
+  }
+  else if ( n == 5 )
+  {
+    w[0] = 0.52175561058280865247586092879E+00;
+    w[1] = 0.3986668110831759274541333481E+00;
+    w[2] = 0.0759424496817075953876533114E+00;
+    w[3] = 0.00361175867992204845446126257E+00;
+    w[4] = 0.00002336997238577622789114908455E+00;
+  }
+  else if ( n == 6 )
+  {
+    w[0] = 0.45896467394996359356828487771E+00;
+    w[1] = 0.4170008307721209941133775662E+00;
+    w[2] = 0.1133733820740449757387061851E+00;
+    w[3] = 0.01039919745314907489891330285E+00;
+    w[4] = 0.000261017202814932059479242860E+00;
+    w[5] = 8.98547906429621238825292053E-07;
+  }
+  else if ( n == 7 )
+  {
+    w[0] = 0.40931895170127390213043288002E+00;
+    w[1] = 0.4218312778617197799292810054E+00;
+    w[2] = 0.1471263486575052783953741846E+00;
+    w[3] = 0.0206335144687169398657056150E+00;
+    w[4] = 0.00107401014328074552213195963E+00;
+    w[5] = 0.0000158654643485642012687326223E+00;
+    w[6] = 3.17031547899558056227132215E-08;
+  }
+  else if ( n == 8 )
+  {
+    w[0] = 0.36918858934163752992058283938E+00;
+    w[1] = 0.4187867808143429560769785813E+00;
+    w[2] = 0.175794986637171805699659867E+00;
+    w[3] = 0.033343492261215651522132535E+00;
+    w[4] = 0.0027945362352256725249389241E+00;
+    w[5] = 0.00009076508773358213104238501E+00;
+    w[6] = 8.4857467162725315448680183E-07;
+    w[7] = 1.04800117487151038161508854E-09;
+  }
+  else if ( n == 9 )
+  {
+    w[0] = 0.336126421797962519673467717606E+00;
+    w[1] = 0.411213980423984387309146942793E+00;
+    w[2] = 0.199287525370885580860575607212E+00;
+    w[3] = 0.0474605627656515992621163600479E+00;
+    w[4] = 0.00559962661079458317700419900556E+00;
+    w[5] = 0.000305249767093210566305412824291E+00;
+    w[6] = 6.59212302607535239225572284875E-06;
+    w[7] = 4.1107693303495484429024104033E-08;
+    w[8] = 3.29087403035070757646681380323E-11;
+  }
+  else if ( n == 10 )
+  {
+    w[0] = 0.30844111576502014154747083468E+00;
+    w[1] = 0.4011199291552735515157803099E+00;
+    w[2] = 0.218068287611809421588648523E+00;
+    w[3] = 0.062087456098677747392902129E+00;
+    w[4] = 0.009501516975181100553839072E+00;
+    w[5] = 0.0007530083885875387754559644E+00;
+    w[6] = 0.00002825923349599565567422564E+00;
+    w[7] = 4.249313984962686372586577E-07;
+    w[8] = 1.839564823979630780921535E-09;
+    w[9] = 9.911827219609008558377547E-13;
+  }
+  else if ( n == 11 )
+  {
+    w[0] = 0.28493321289420060505605102472E+00;
+    w[1] = 0.3897208895278493779375535080E+00;
+    w[2] = 0.232781831848991333940223796E+00;
+    w[3] = 0.076564453546196686400854179E+00;
+    w[4] = 0.014393282767350695091863919E+00;
+    w[5] = 0.001518880846484873069847776E+00;
+    w[6] = 0.0000851312243547192259720424E+00;
+    w[7] = 2.29240387957450407857683E-06;
+    w[8] = 2.48635370276779587373391E-08;
+    w[9] = 7.71262693369132047028153E-11;
+    w[10] = 2.883775868323623861597778E-14;
+  }
+  else if ( n == 12 )
+  {
+    w[0] = 0.26473137105544319034973889206E+00;
+    w[1] = 0.3777592758731379820244905567E+00;
+    w[2] = 0.244082011319877564254870818E+00;
+    w[3] = 0.09044922221168093072750549E+00;
+    w[4] = 0.02010238115463409652266129E+00;
+    w[5] = 0.002663973541865315881054158E+00;
+    w[6] = 0.000203231592662999392121433E+00;
+    w[7] = 8.3650558568197987453363E-06;
+    w[8] = 1.66849387654091026116990E-07;
+    w[9] = 1.34239103051500414552392E-09;
+    w[10] = 3.06160163503502078142408E-12;
+    w[11] = 8.148077467426241682473119E-16;
+  }
+  else if ( n == 13 )
+  {
+    w[0] = 0.24718870842996262134624918596E+00;
+    w[1] = 0.3656888229005219453067175309E+00;
+    w[2] = 0.252562420057658502356824289E+00;
+    w[3] = 0.10347075802418370511421863E+00;
+    w[4] = 0.02643275441556161577815877E+00;
+    w[5] = 0.00422039604025475276555209E+00;
+    w[6] = 0.000411881770472734774892473E+00;
+    w[7] = 0.0000235154739815532386882897E+00;
+    w[8] = 7.3173116202490991040105E-07;
+    w[9] = 1.10884162570398067979151E-08;
+    w[10] = 6.7708266922058988406462E-11;
+    w[11] = 1.15997995990507606094507E-13;
+    w[12] = 2.245093203892758415991872E-17;
+  }
+  else if ( n == 14 )
+  {
+    w[0] = 0.23181557714486497784077486110E+00;
+    w[1] = 0.3537846915975431518023313013E+00;
+    w[2] = 0.258734610245428085987320561E+00;
+    w[3] = 0.11548289355692321008730499E+00;
+    w[4] = 0.03319209215933736003874996E+00;
+    w[5] = 0.00619286943700661021678786E+00;
+    w[6] = 0.00073989037786738594242589E+00;
+    w[7] = 0.000054907194668416983785733E+00;
+    w[8] = 2.4095857640853774967578E-06;
+    w[9] = 5.801543981676495180886E-08;
+    w[10] = 6.819314692484974119616E-10;
+    w[11] = 3.2212077518948479398089E-12;
+    w[12] = 4.2213524405165873515980E-15;
+    w[13] = 6.05237502228918880839871E-19;
+  }
+  else if ( n == 15 )
+  {
+    w[0] = 0.21823488594008688985641323645E+00;
+    w[1] = 0.3422101779228833296389489568E+00;
+    w[2] = 0.263027577941680097414812275E+00;
+    w[3] = 0.12642581810593053584303055E+00;
+    w[4] = 0.04020686492100091484158548E+00;
+    w[5] = 0.00856387780361183836391576E+00;
+    w[6] = 0.00121243614721425207621921E+00;
+    w[7] = 0.00011167439234425194199258E+00;
+    w[8] = 6.459926762022900924653E-06;
+    w[9] = 2.226316907096272630332E-07;
+    w[10] = 4.227430384979365007351E-09;
+    w[11] = 3.921897267041089290385E-11;
+    w[12] = 1.4565152640731264063327E-13;
+    w[13] = 1.4830270511133013354616E-16;
+    w[14] = 1.60059490621113323104998E-20;
+  }
+  else if ( n == 16 )
+  {
+    w[0] = 0.20615171495780099433427363674E+00;
+    w[1] = 0.3310578549508841659929830987E+00;
+    w[2] = 0.265795777644214152599502021E+00;
+    w[3] = 0.13629693429637753997554751E+00;
+    w[4] = 0.0473289286941252189780623E+00;
+    w[5] = 0.0112999000803394532312490E+00;
+    w[6] = 0.0018490709435263108642918E+00;
+    w[7] = 0.00020427191530827846012602E+00;
+    w[8] = 0.00001484458687398129877135E+00;
+    w[9] = 6.828319330871199564396E-07;
+    w[10] = 1.881024841079673213882E-08;
+    w[11] = 2.862350242973881619631E-10;
+    w[12] = 2.127079033224102967390E-12;
+    w[13] = 6.297967002517867787174E-15;
+    w[14] = 5.050473700035512820402E-18;
+    w[15] = 4.1614623703728551904265E-22;
+  }
+  else if ( n == 17 )
+  {
+    w[0] = 0.19533220525177083214592729770E+00;
+    w[1] = 0.3203753572745402813366256320E+00;
+    w[2] = 0.267329726357171097238809604E+00;
+    w[3] = 0.14512985435875862540742645E+00;
+    w[4] = 0.0544369432453384577793806E+00;
+    w[5] = 0.0143572977660618672917767E+00;
+    w[6] = 0.0026628247355727725684324E+00;
+    w[7] = 0.0003436797271562999206118E+00;
+    w[8] = 0.00003027551783782870109437E+00;
+    w[9] = 1.768515053231676895381E-06;
+    w[10] = 6.57627288681043332199E-08;
+    w[11] = 1.469730932159546790344E-09;
+    w[12] = 1.81691036255544979555E-11;
+    w[13] = 1.095401388928687402976E-13;
+    w[14] = 2.617373882223370421551E-16;
+    w[15] = 1.6729356931461546908502E-19;
+    w[16] = 1.06562631627404278815253E-23;
+  }
+  else if ( n == 18 )
+  {
+    w[0] = 0.18558860314691880562333775228E+00;
+    w[1] = 0.3101817663702252936495975957E+00;
+    w[2] = 0.267866567148536354820854395E+00;
+    w[3] = 0.15297974746807490655384308E+00;
+    w[4] = 0.0614349178609616527076780E+00;
+    w[5] = 0.0176872130807729312772600E+00;
+    w[6] = 0.0036601797677599177980266E+00;
+    w[7] = 0.0005406227870077353231284E+00;
+    w[8] = 0.0000561696505121423113818E+00;
+    w[9] = 4.01530788370115755859E-06;
+    w[10] = 1.91466985667567497969E-07;
+    w[11] = 5.8360952686315941292E-09;
+    w[12] = 1.07171126695539012773E-10;
+    w[13] = 1.08909871388883385562E-12;
+    w[14] = 5.38666474837830887608E-15;
+    w[15] = 1.049865978035703408779E-17;
+    w[16] = 5.405398451631053643566E-21;
+    w[17] = 2.6916532692010286270838E-25;
+  }
+  else if ( n == 19 )
+  {
+    w[0] = 0.17676847491591250225103547981E+00;
+    w[1] = 0.3004781436072543794821568077E+00;
+    w[2] = 0.267599547038175030772695441E+00;
+    w[3] = 0.15991337213558021678551215E+00;
+    w[4] = 0.0682493799761491134552355E+00;
+    w[5] = 0.0212393076065443249244062E+00;
+    w[6] = 0.0048416273511483959672501E+00;
+    w[7] = 0.0008049127473813667665946E+00;
+    w[8] = 0.0000965247209315350170843E+00;
+    w[9] = 8.20730525805103054409E-06;
+    w[10] = 4.8305667247307725394E-07;
+    w[11] = 1.90499136112328569994E-08;
+    w[12] = 4.8166846309280615577E-10;
+    w[13] = 7.3482588395511443768E-12;
+    w[14] = 6.2022753875726163989E-14;
+    w[15] = 2.54143084301542272372E-16;
+    w[16] = 4.07886129682571235007E-19;
+    w[17] = 1.707750187593837061004E-22;
+    w[18] = 6.715064649908189959990E-27;
+  }
+  else if ( n == 20 )
+  {
+    w[0] = 0.168746801851113862149223899689E+00;
+    w[1] = 0.291254362006068281716795323812E+00;
+    w[2] = 0.266686102867001288549520868998E+00;
+    w[3] = 0.166002453269506840031469127816E+00;
+    w[4] = 0.0748260646687923705400624639615E+00;
+    w[5] = 0.0249644173092832210728227383234E+00;
+    w[6] = 0.00620255084457223684744754785395E+00;
+    w[7] = 0.00114496238647690824203955356969E+00;
+    w[8] = 0.000155741773027811974779809513214E+00;
+    w[9] = 0.0000154014408652249156893806714048E+00;
+    w[10] = 1.08648636651798235147970004439E-06;
+    w[11] = 5.33012090955671475092780244305E-08;
+    w[12] = 1.7579811790505820035778763784E-09;
+    w[13] = 3.72550240251232087262924585338E-11;
+    w[14] = 4.76752925157819052449488071613E-13;
+    w[15] = 3.37284424336243841236506064991E-15;
+    w[16] = 1.15501433950039883096396247181E-17;
+    w[17] = 1.53952214058234355346383319667E-20;
+    w[18] = 5.28644272556915782880273587683E-24;
+    w[19] = 1.65645661249902329590781908529E-28;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LAGUERRE_LOOKUP_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::cerr << "  Legal values are 1 through 20.\n";
+    std::exit ( 1 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_ss_compute ( int order, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_SS_COMPUTE computes a Laguerre quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( 0 <= X < +oo ) exp ( - X ) * F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * F ( X(I) )
+//
+//    The integral:
+//
+//        Integral ( A <= X < +oo ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= ORDER ) W(I) * exp ( X(I) ) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    1 <= ORDER.
+//
+//    Output, double X[ORDER], the abscissas.
+//
+//    Output, double W[ORDER], the weights.
+//
+{
+  double *b;
+  double *c;
+  double cc;
+  double dp2;
+  int i;
+  int j;
+  double p1;
+  double prod;
+  double r1;
+  double x0;
+
+  if ( order < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "LAGUERRE_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of ORDER = " << order << "\n";
+    std::exit ( 1 );
+  }
+
+  b = new double[order];
+  c = new double[order];
+//
+//  Set the recursion coefficients.
+//
+  for ( i = 0; i < order; i++ )
+  {
+    b[i] = ( double ) ( 2 * i + 1 );
+  }
+
+  for ( i = 0; i < order; i++ )
+  {
+    c[i] = ( double ) ( i * i );
+  }
+  prod = 1.0;
+  for ( i = 1; i < order; i++ )
+  {
+    prod = prod * c[i];
+  }
+  cc = prod;
+
+  for ( i = 0; i < order; i++ )
+  {
+//
+//  Compute an estimate for the root.
+//
+    if ( i == 0 )
+    {
+      x0 =  3.0 / ( 1.0 + 2.4 * ( double ) ( order ) );
+    }
+    else if ( i == 1 )
+    {
+      x0 = x0 + 15.0 / ( 1.0 + 2.5 * ( double ) ( order ) );
+    }
+    else
+    {
+      r1 = ( 1.0 + 2.55 * ( double ) ( i - 1 ) )
+        / ( 1.9 * ( double ) ( i - 1 ) );
+
+      x0 = x0 + r1 * ( x0 - x[i-2] );
+    }
+//
+//  Use iteration to find the root.
+//
+    webbur::laguerre_ss_root ( &x0, order, &dp2, &p1, b, c );
+//
+//  Set the abscissa and weight.
+//
+    x[i] = x0;
+//
+//  Because of the huge values involved, this calculation breaks down
+//  for ORDER = 127.
+//
+//  It was originally w[i] = ( cc / dp2 ) / p1, which breaks down sooner.
+//
+    w[i] = ( 1.0 / dp2 );
+    for ( j = 2; j <= order; j++ )
+    {
+      w[i] = w[i] * ( double ) ( j - 1 );
+    }
+    w[i] = w[i] / p1;
+    for ( j = 2; j <= order; j++ )
+    {
+      w[i] = w[i] * ( double ) ( j - 1 );
+    }
+
+//  w[i] = ( cc / dp2 ) / p1;
+  }
+
+  delete [] b;
+  delete [] c;
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_ss_recur ( double *p2, double *dp2, double *p1, double x,
+  int order, double b[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_SS_RECUR evaluates a Laguerre polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Output, double *P2, the value of L(ORDER)(X).
+//
+//    Output, double *DP2, the value of L'(ORDER)(X).
+//
+//    Output, double *P1, the value of L(ORDER-1)(X).
+//
+//    Input, double X, the point at which polynomials are evaluated.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Input, double B[ORDER], C[ORDER], the recursion coefficients.
+//
+{
+  double dp0;
+  double dp1;
+  int i;
+  double p0;
+
+  *p1 = 1.0;
+  dp1 = 0.0;
+
+  *p2 = x - 1.0;
+  *dp2 = 1.0;
+
+  for ( i = 1; i < order; i++ )
+  {
+    p0 = *p1;
+    dp0 = dp1;
+
+    *p1 = *p2;
+    dp1 = *dp2;
+
+    *p2  = ( x - b[i] ) * ( *p1 ) - c[i] * p0;
+    *dp2 = ( x - b[i] ) * dp1 + ( *p1 ) - c[i] * dp0;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void laguerre_ss_root ( double *x, int order, double *dp2, double *p1,
+  double b[], double c[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LAGUERRE_SS_ROOT improves a root of a Laguerre polynomial.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Arthur Stroud, Don Secrest.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input/output, double *X, the approximate root, which
+//    should be improved on output.
+//
+//    Input, int ORDER, the order of the polynomial.
+//
+//    Output, double *DP2, the value of L'(ORDER)(X).
+//
+//    Output, double *P1, the value of L(ORDER-1)(X).
+//
+//    Input, double B[ORDER], C[ORDER], the recursion coefficients.
+//
+{
+  double d;
+  double eps;
+  double p2;
+  int step;
+  int step_max = 10;
+
+  eps = webbur::r8_epsilon ( );
+
+  for ( step = 1; step <= step_max; step++ )
+  {
+    webbur::laguerre_ss_recur ( &p2, dp2, p1, *x, order, b, c );
+
+    d = p2 / ( *dp2 );
+    *x = *x - d;
+
+    if ( webbur::r8_abs ( d ) <= eps * ( webbur::r8_abs ( *x ) + 1.0 ) )
+    {
+      break;
+    }
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_COMPUTE: Legendre quadrature rule by the Elhay-Kautsky method.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 April 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Sylvan Elhay, Jaroslav Kautsky.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Sylvan Elhay, Jaroslav Kautsky,
+//    Algorithm 655: IQPACK, FORTRAN Subroutines for the Weights of
+//    Interpolatory Quadrature,
+//    ACM Transactions on Mathematical Software,
+//    Volume 13, Number 4, December 1987, pages 399-415.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *bj;
+  int i;
+  double zemu;
+//
+//  Define the zero-th moment.
+//
+  zemu = 2.0;
+//
+//  Define the Jacobi matrix.
+//
+  bj = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    bj[i] = ( double ) ( ( i + 1 ) * ( i + 1 ) )
+          / ( double ) ( 4 * ( i + 1 ) * ( i + 1 ) - 1 );
+    bj[i] = std::sqrt ( bj[i] );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = 0.0;
+  }
+
+  w[0] = std::sqrt ( zemu );
+
+  for ( i = 1; i < n; i++ )
+  {
+    w[i] = 0.0;
+  }
+//
+//  Diagonalize the Jacobi matrix.
+//
+  webbur::imtqlx ( n, x, bj, w );
+
+  for ( i = 0; i < n; i++ )
+  {
+    w[i] = w[i] * w[i];
+  }
+
+  delete [] bj;
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_compute_np ( int n, int np, double p[], double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_COMPUTE_NP computes a Legendre quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Philip Davis, Philip Rabinowitz.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::legendre_compute ( n, x, w );
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_COMPUTE_POINTS computes Legendre quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  double *w;
+
+  w= new double[n];
+
+  webbur::legendre_compute ( n, x, w );
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_compute_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_COMPUTE_POINTS_NP computes Legendre quadrature points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  webbur::legendre_compute_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_COMPUTE_WEIGHTS computes Legendre quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *x;
+
+  x = new double[n];
+
+  webbur::legendre_compute ( n, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_compute_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_COMPUTE_WEIGHTS_NP computes Legendre quadrature weights.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  webbur::legendre_compute_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_dr_compute ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_DR_COMPUTE computes a Legendre quadrature rule.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      Integral ( -1 <= X <= 1 ) F(X) dX
+//
+//    The quadrature rule:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 June 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Philip Davis, Philip Rabinowitz.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    1 <= N.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double d1;
+  double d2pn;
+  double d3pn;
+  double d4pn;
+  double dp;
+  double dpn;
+  double e1;
+  double fx;
+  double h;
+  int i;
+  int iback;
+  int k;
+  int m;
+  int mp1mi;
+  int ncopy;
+  int nmove;
+  double p;
+  double pi = 3.141592653589793;
+  double pk;
+  double pkm1;
+  double pkp1;
+  double t;
+  double u;
+  double v;
+  double x0;
+  double xtemp;
+
+  if ( n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "LEGENDRE_DR_COMPUTE - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  e1 = ( double ) ( n * ( n + 1 ) );
+
+  m = ( n + 1 ) / 2;
+
+  for ( i = 1; i <= m; i++ )
+  {
+    mp1mi = m + 1 - i;
+
+    t = ( double ) ( 4 * i - 1 ) * pi / ( double ) ( 4 * n + 2 );
+
+    x0 =  std::cos ( t ) * ( 1.0 - ( 1.0 - 1.0 / ( double ) ( n ) )
+      / ( double ) ( 8 * n * n ) );
+
+    pkm1 = 1.0;
+    pk = x0;
+
+    for ( k = 2; k <= n; k++ )
+    {
+      pkp1 = 2.0 * x0 * pk - pkm1 - ( x0 * pk - pkm1 ) / ( double ) ( k );
+      pkm1 = pk;
+      pk = pkp1;
+    }
+
+    d1 = ( double ) ( n ) * ( pkm1 - x0 * pk );
+
+    dpn = d1 / ( 1.0 - x0 * x0 );
+
+    d2pn = ( 2.0 * x0 * dpn - e1 * pk ) / ( 1.0 - x0 * x0 );
+
+    d3pn = ( 4.0 * x0 * d2pn + ( 2.0 - e1 ) * dpn ) / ( 1.0 - x0 * x0 );
+
+    d4pn = ( 6.0 * x0 * d3pn + ( 6.0 - e1 ) * d2pn ) / ( 1.0 - x0 * x0 );
+
+    u = pk / dpn;
+    v = d2pn / dpn;
+//
+//  Initial approximation H:
+//
+    h = -u * ( 1.0 + 0.5 * u * ( v + u * ( v * v - d3pn / ( 3.0 * dpn ) ) ) );
+//
+//  Refine H using one step of Newton's method:
+//
+    p = pk + h * ( dpn + 0.5 * h * ( d2pn + h / 3.0
+      * ( d3pn + 0.25 * h * d4pn ) ) );
+
+    dp = dpn + h * ( d2pn + 0.5 * h * ( d3pn + h * d4pn / 3.0 ) );
+
+    h = h - p / dp;
+
+    xtemp = x0 + h;
+
+    x[mp1mi-1] = xtemp;
+
+    fx = d1 - h * e1 * ( pk + 0.5 * h * ( dpn + h / 3.0
+      * ( d2pn + 0.25 * h * ( d3pn + 0.2 * h * d4pn ) ) ) );
+
+    w[mp1mi-1] = 2.0 * ( 1.0 - xtemp * xtemp ) / ( fx * fx );
+  }
+
+  if ( ( n % 2 ) == 1 )
+  {
+    x[0] = 0.0;
+  }
+//
+//  Shift the data up.
+//
+  nmove = ( n + 1 ) / 2;
+  ncopy = n - nmove;
+
+  for ( i = 1; i <= nmove; i++ )
+  {
+    iback = n + 1 - i;
+    x[iback-1] = x[iback-ncopy-1];
+    w[iback-1] = w[iback-ncopy-1];
+  }
+//
+//  Reflect values for the negative abscissas.
+//
+  for ( i = 1; i <= n - nmove; i++ )
+  {
+    x[i-1] = - x[n-i];
+    w[i-1] = w[n-i];
+  }
+
+  return;
+}
+//****************************************************************************80
+
+double legendre_integral ( int expon )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_INTEGRAL evaluates a monomial Legendre integral.
+//
+//  Discussion:
+//
+//    The integral:
+//
+//      integral ( -1 <= x <= +1 ) x^n dx
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int EXPON, the exponent.
+//
+//    Output, double LEGENDRE_INTEGRAL, the value of the exact integral.
+//
+{
+  double exact;
+//
+//  Get the exact value of the integral.
+//
+  if ( ( expon % 2 ) == 0 )
+  {
+    exact = 2.0 / ( double ) ( expon + 1 );
+  }
+  else
+  {
+    exact = 0.0;
+  }
+
+  return exact;
+}
+//****************************************************************************80
+
+void legendre_lookup ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_LOOKUP looks up abscissas and weights for Gauss-Legendre quadrature.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798.
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3,
+//    LC: QA47.M315.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 33.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the abscissas.
+//
+{
+  webbur::legendre_lookup_points ( n, x );
+
+  webbur::legendre_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void legendre_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_LOOKUP_POINTS looks up abscissas for Gauss-Legendre quadrature.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798.
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3,
+//    LC: QA47.M315.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 33.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  if ( n == 1 )
+  {
+    x[0] = 0.000000000000000000000000000000;
+  }
+  else if ( n == 2 )
+  {
+    x[0] = -0.577350269189625764509148780502;
+    x[1] = 0.577350269189625764509148780502;
+  }
+  else if ( n == 3 )
+  {
+    x[0] = -0.774596669241483377035853079956;
+    x[1] = 0.000000000000000000000000000000;
+    x[2] = 0.774596669241483377035853079956;
+  }
+  else if ( n == 4 )
+  {
+    x[0] = -0.861136311594052575223946488893;
+    x[1] = -0.339981043584856264802665759103;
+    x[2] = 0.339981043584856264802665759103;
+    x[3] = 0.861136311594052575223946488893;
+  }
+  else if ( n == 5 )
+  {
+    x[0] = -0.906179845938663992797626878299;
+    x[1] = -0.538469310105683091036314420700;
+    x[2] = 0.000000000000000000000000000000;
+    x[3] = 0.538469310105683091036314420700;
+    x[4] = 0.906179845938663992797626878299;
+  }
+  else if ( n == 6 )
+  {
+    x[0] = -0.932469514203152027812301554494;
+    x[1] = -0.661209386466264513661399595020;
+    x[2] = -0.238619186083196908630501721681;
+    x[3] = 0.238619186083196908630501721681;
+    x[4] = 0.661209386466264513661399595020;
+    x[5] = 0.932469514203152027812301554494;
+  }
+  else if ( n == 7 )
+  {
+    x[0] = -0.949107912342758524526189684048;
+    x[1] = -0.741531185599394439863864773281;
+    x[2] = -0.405845151377397166906606412077;
+    x[3] = 0.000000000000000000000000000000;
+    x[4] = 0.405845151377397166906606412077;
+    x[5] = 0.741531185599394439863864773281;
+    x[6] = 0.949107912342758524526189684048;
+  }
+  else if ( n == 8 )
+  {
+    x[0] = -0.960289856497536231683560868569;
+    x[1] = -0.796666477413626739591553936476;
+    x[2] = -0.525532409916328985817739049189;
+    x[3] = -0.183434642495649804939476142360;
+    x[4] = 0.183434642495649804939476142360;
+    x[5] = 0.525532409916328985817739049189;
+    x[6] = 0.796666477413626739591553936476;
+    x[7] = 0.960289856497536231683560868569;
+  }
+  else if ( n == 9 )
+  {
+    x[0] = -0.968160239507626089835576203;
+    x[1] = -0.836031107326635794299429788;
+    x[2] = -0.613371432700590397308702039;
+    x[3] = -0.324253423403808929038538015;
+    x[4] = 0.000000000000000000000000000;
+    x[5] = 0.324253423403808929038538015;
+    x[6] = 0.613371432700590397308702039;
+    x[7] = 0.836031107326635794299429788;
+    x[8] = 0.968160239507626089835576203;
+  }
+  else if ( n == 10 )
+  {
+    x[0] = -0.973906528517171720077964012;
+    x[1] = -0.865063366688984510732096688;
+    x[2] = -0.679409568299024406234327365;
+    x[3] = -0.433395394129247190799265943;
+    x[4] = -0.148874338981631210884826001;
+    x[5] = 0.148874338981631210884826001;
+    x[6] = 0.433395394129247190799265943;
+    x[7] = 0.679409568299024406234327365;
+    x[8] = 0.865063366688984510732096688;
+    x[9] = 0.973906528517171720077964012;
+  }
+  else if ( n == 11 )
+  {
+    x[0] = -0.978228658146056992803938001;
+    x[1] = -0.887062599768095299075157769;
+    x[2] = -0.730152005574049324093416252;
+    x[3] = -0.519096129206811815925725669;
+    x[4] = -0.269543155952344972331531985;
+    x[5] = 0.000000000000000000000000000;
+    x[6] = 0.269543155952344972331531985;
+    x[7] = 0.519096129206811815925725669;
+    x[8] = 0.730152005574049324093416252;
+    x[9] = 0.887062599768095299075157769;
+    x[10] = 0.978228658146056992803938001;
+  }
+  else if ( n == 12 )
+  {
+    x[0] = -0.981560634246719250690549090;
+    x[1] = -0.904117256370474856678465866;
+    x[2] = -0.769902674194304687036893833;
+    x[3] = -0.587317954286617447296702419;
+    x[4] = -0.367831498998180193752691537;
+    x[5] = -0.125233408511468915472441369;
+    x[6] = 0.125233408511468915472441369;
+    x[7] = 0.367831498998180193752691537;
+    x[8] = 0.587317954286617447296702419;
+    x[9] = 0.769902674194304687036893833;
+    x[10] = 0.904117256370474856678465866;
+    x[11] = 0.981560634246719250690549090;
+  }
+  else if ( n == 13 )
+  {
+    x[0] = -0.984183054718588149472829449;
+    x[1] = -0.917598399222977965206547837;
+    x[2] = -0.801578090733309912794206490;
+    x[3] = -0.642349339440340220643984607;
+    x[4] = -0.448492751036446852877912852;
+    x[5] = -0.230458315955134794065528121;
+    x[6] = 0.000000000000000000000000000;
+    x[7] = 0.230458315955134794065528121;
+    x[8] = 0.448492751036446852877912852;
+    x[9] = 0.642349339440340220643984607;
+    x[10] = 0.80157809073330991279420649;
+    x[11] = 0.91759839922297796520654784;
+    x[12] = 0.98418305471858814947282945;
+  }
+  else if ( n == 14 )
+  {
+    x[0] = -0.986283808696812338841597267;
+    x[1] = -0.928434883663573517336391139;
+    x[2] = -0.827201315069764993189794743;
+    x[3] = -0.687292904811685470148019803;
+    x[4] = -0.515248636358154091965290719;
+    x[5] = -0.319112368927889760435671824;
+    x[6] = -0.108054948707343662066244650;
+    x[7] = 0.108054948707343662066244650;
+    x[8] = 0.31911236892788976043567182;
+    x[9] = 0.51524863635815409196529072;
+    x[10] = 0.68729290481168547014801980;
+    x[11] = 0.82720131506976499318979474;
+    x[12] = 0.92843488366357351733639114;
+    x[13] = 0.98628380869681233884159727;
+  }
+  else if ( n == 15 )
+  {
+    x[0] = -0.987992518020485428489565719;
+    x[1] = -0.937273392400705904307758948;
+    x[2] = -0.848206583410427216200648321;
+    x[3] = -0.724417731360170047416186055;
+    x[4] = -0.570972172608538847537226737;
+    x[5] = -0.394151347077563369897207371;
+    x[6] = -0.201194093997434522300628303;
+    x[7] = 0.00000000000000000000000000;
+    x[8] = 0.20119409399743452230062830;
+    x[9] = 0.39415134707756336989720737;
+    x[10] = 0.57097217260853884753722674;
+    x[11] = 0.72441773136017004741618605;
+    x[12] = 0.84820658341042721620064832;
+    x[13] = 0.93727339240070590430775895;
+    x[14] = 0.98799251802048542848956572;
+  }
+  else if ( n == 16 )
+  {
+    x[0] = -0.989400934991649932596154173;
+    x[1] = -0.944575023073232576077988416;
+    x[2] = -0.865631202387831743880467898;
+    x[3] = -0.755404408355003033895101195;
+    x[4] = -0.617876244402643748446671764;
+    x[5] = -0.458016777657227386342419443;
+    x[6] = -0.281603550779258913230460501;
+    x[7] = -0.09501250983763744018531934;
+    x[8] = 0.09501250983763744018531934;
+    x[9] = 0.28160355077925891323046050;
+    x[10] = 0.45801677765722738634241944;
+    x[11] = 0.61787624440264374844667176;
+    x[12] = 0.75540440835500303389510119;
+    x[13] = 0.86563120238783174388046790;
+    x[14] = 0.94457502307323257607798842;
+    x[15] = 0.98940093499164993259615417;
+  }
+  else if ( n == 17 )
+  {
+    x[0] = -0.990575475314417335675434020;
+    x[1] = -0.950675521768767761222716958;
+    x[2] = -0.880239153726985902122955694;
+    x[3] = -0.781514003896801406925230056;
+    x[4] = -0.657671159216690765850302217;
+    x[5] = -0.512690537086476967886246569;
+    x[6] = -0.35123176345387631529718552;
+    x[7] = -0.17848418149584785585067749;
+    x[8] = 0.00000000000000000000000000;
+    x[9] = 0.17848418149584785585067749;
+    x[10] = 0.35123176345387631529718552;
+    x[11] = 0.51269053708647696788624657;
+    x[12] = 0.65767115921669076585030222;
+    x[13] = 0.78151400389680140692523006;
+    x[14] = 0.88023915372698590212295569;
+    x[15] = 0.95067552176876776122271696;
+    x[16] = 0.99057547531441733567543402;
+  }
+  else if ( n == 18 )
+  {
+    x[0] = -0.991565168420930946730016005;
+    x[1] = -0.955823949571397755181195893;
+    x[2] = -0.892602466497555739206060591;
+    x[3] = -0.803704958972523115682417455;
+    x[4] = -0.691687043060353207874891081;
+    x[5] = -0.55977083107394753460787155;
+    x[6] = -0.41175116146284264603593179;
+    x[7] = -0.25188622569150550958897285;
+    x[8] = -0.08477501304173530124226185;
+    x[9] = 0.08477501304173530124226185;
+    x[10] = 0.25188622569150550958897285;
+    x[11] = 0.41175116146284264603593179;
+    x[12] = 0.55977083107394753460787155;
+    x[13] = 0.69168704306035320787489108;
+    x[14] = 0.80370495897252311568241746;
+    x[15] = 0.89260246649755573920606059;
+    x[16] = 0.95582394957139775518119589;
+    x[17] = 0.99156516842093094673001600;
+  }
+  else if ( n == 19 )
+  {
+    x[0] = -0.992406843843584403189017670;
+    x[1] = -0.960208152134830030852778841;
+    x[2] = -0.903155903614817901642660929;
+    x[3] = -0.822714656537142824978922487;
+    x[4] = -0.72096617733522937861709586;
+    x[5] = -0.60054530466168102346963816;
+    x[6] = -0.46457074137596094571726715;
+    x[7] = -0.31656409996362983199011733;
+    x[8] = -0.16035864564022537586809612;
+    x[9] = 0.00000000000000000000000000;
+    x[10] = 0.16035864564022537586809612;
+    x[11] = 0.31656409996362983199011733;
+    x[12] = 0.46457074137596094571726715;
+    x[13] = 0.60054530466168102346963816;
+    x[14] = 0.72096617733522937861709586;
+    x[15] = 0.82271465653714282497892249;
+    x[16] = 0.90315590361481790164266093;
+    x[17] = 0.96020815213483003085277884;
+    x[18] = 0.99240684384358440318901767;
+  }
+  else if ( n == 20 )
+  {
+    x[0] = -0.993128599185094924786122388;
+    x[1] = -0.963971927277913791267666131;
+    x[2] = -0.912234428251325905867752441;
+    x[3] = -0.83911697182221882339452906;
+    x[4] = -0.74633190646015079261430507;
+    x[5] = -0.63605368072651502545283670;
+    x[6] = -0.51086700195082709800436405;
+    x[7] = -0.37370608871541956067254818;
+    x[8] = -0.22778585114164507808049620;
+    x[9] = -0.07652652113349733375464041;
+    x[10] = 0.07652652113349733375464041;
+    x[11] = 0.22778585114164507808049620;
+    x[12] = 0.37370608871541956067254818;
+    x[13] = 0.51086700195082709800436405;
+    x[14] = 0.63605368072651502545283670;
+    x[15] = 0.74633190646015079261430507;
+    x[16] = 0.83911697182221882339452906;
+    x[17] = 0.91223442825132590586775244;
+    x[18] = 0.96397192727791379126766613;
+    x[19] = 0.99312859918509492478612239;
+  }
+  else if ( n == 21 )
+  {
+    x[ 0] =  -0.99375217062038950026024204;
+    x[ 1] =  -0.96722683856630629431662221;
+    x[ 2] =  -0.92009933415040082879018713;
+    x[ 3] =  -0.85336336458331728364725064;
+    x[ 4] =  -0.76843996347567790861587785;
+    x[ 5] =  -0.66713880419741231930596667;
+    x[ 6] =  -0.55161883588721980705901880;
+    x[ 7] =  -0.42434212020743878357366889;
+    x[ 8] =  -0.28802131680240109660079252;
+    x[9] =  -0.14556185416089509093703098;
+    x[10] =   0.00000000000000000000000000;
+    x[11] =  +0.14556185416089509093703098;
+    x[12] =  +0.28802131680240109660079252;
+    x[13] =  +0.42434212020743878357366889;
+    x[14] =  +0.55161883588721980705901880;
+    x[15] =  +0.66713880419741231930596667;
+    x[16] =  +0.76843996347567790861587785;
+    x[17] =  +0.85336336458331728364725064;
+    x[18] =  +0.92009933415040082879018713;
+    x[19] =  +0.96722683856630629431662221;
+    x[20] =  +0.99375217062038950026024204;
+  }
+  else if ( n == 22 )
+  {
+    x[0] = -0.99429458548239929207303142;
+    x[1] = -0.97006049783542872712395099;
+    x[2] = -0.92695677218717400052069294;
+    x[3] = -0.86581257772030013653642564;
+    x[4] = -0.78781680597920816200427796;
+    x[5] = -0.69448726318668278005068984;
+    x[6] = -0.58764040350691159295887693;
+    x[7] = -0.46935583798675702640633071;
+    x[8] = -0.34193582089208422515814742;
+    x[9] = -0.20786042668822128547884653;
+    x[10] = -0.06973927331972222121384180;
+    x[11] = 0.06973927331972222121384180;
+    x[12] = 0.20786042668822128547884653;
+    x[13] = 0.34193582089208422515814742;
+    x[14] = 0.46935583798675702640633071;
+    x[15] = 0.58764040350691159295887693;
+    x[16] = 0.69448726318668278005068984;
+    x[17] = 0.78781680597920816200427796;
+    x[18] = 0.86581257772030013653642564;
+    x[19] = 0.92695677218717400052069294;
+    x[20] = 0.97006049783542872712395099;
+    x[21] = 0.99429458548239929207303142;
+  }
+  else if ( n == 23 )
+  {
+    x[0] = -0.99476933499755212352392572;
+    x[1] = -0.97254247121811523195602408;
+    x[2] = -0.93297108682601610234919699;
+    x[3] = -0.87675235827044166737815689;
+    x[4] = -0.80488840161883989215111841;
+    x[5] = -0.71866136313195019446162448;
+    x[6] = -0.61960987576364615638509731;
+    x[7] = -0.50950147784600754968979305;
+    x[8] = -0.39030103803029083142148887;
+    x[9] = -0.26413568097034493053386954;
+    x[10] = -0.13325682429846611093174268;
+    x[11] = 0.00000000000000000000000000;
+    x[12] = 0.13325682429846611093174268;
+    x[13] = 0.26413568097034493053386954;
+    x[14] = 0.39030103803029083142148887;
+    x[15] = 0.50950147784600754968979305;
+    x[16] = 0.61960987576364615638509731;
+    x[17] = 0.71866136313195019446162448;
+    x[18] = 0.80488840161883989215111841;
+    x[19] = 0.87675235827044166737815689;
+    x[20] = 0.93297108682601610234919699;
+    x[21] = 0.97254247121811523195602408;
+    x[22] = 0.99476933499755212352392572;
+  }
+  else if ( n == 24 )
+  {
+    x[0] = -0.99518721999702136017999741;
+    x[1] = -0.97472855597130949819839199;
+    x[2] = -0.93827455200273275852364900;
+    x[3] = -0.88641552700440103421315434;
+    x[4] = -0.82000198597390292195394987;
+    x[5] = -0.74012419157855436424382810;
+    x[6] = -0.64809365193697556925249579;
+    x[7] = -0.54542147138883953565837562;
+    x[8] = -0.43379350762604513848708423;
+    x[9] = -0.31504267969616337438679329;
+    x[10] = -0.19111886747361630915863982;
+    x[11] = -0.06405689286260562608504308;
+    x[12] = 0.06405689286260562608504308;
+    x[13] = 0.19111886747361630915863982;
+    x[14] = 0.31504267969616337438679329;
+    x[15] = 0.43379350762604513848708423;
+    x[16] = 0.54542147138883953565837562;
+    x[17] = 0.64809365193697556925249579;
+    x[18] = 0.74012419157855436424382810;
+    x[19] = 0.82000198597390292195394987;
+    x[20] = 0.88641552700440103421315434;
+    x[21] = 0.93827455200273275852364900;
+    x[22] = 0.97472855597130949819839199;
+    x[23] = 0.99518721999702136017999741;
+  }
+  else if ( n == 25 )
+  {
+    x[0] = -0.99555696979049809790878495;
+    x[1] = -0.97666392145951751149831539;
+    x[2] = -0.94297457122897433941401117;
+    x[3] = -0.89499199787827536885104201;
+    x[4] = -0.83344262876083400142102111;
+    x[5] = -0.75925926303735763057728287;
+    x[6] = -0.67356636847346836448512063;
+    x[7] = -0.57766293024122296772368984;
+    x[8] = -0.47300273144571496052218212;
+    x[9] = -0.36117230580938783773582173;
+    x[10] = -0.24386688372098843204519036;
+    x[11] = -0.12286469261071039638735982;
+    x[12] = 0.00000000000000000000000000;
+    x[13] = 0.12286469261071039638735982;
+    x[14] = 0.24386688372098843204519036;
+    x[15] = 0.36117230580938783773582173;
+    x[16] = 0.47300273144571496052218212;
+    x[17] = 0.57766293024122296772368984;
+    x[18] = 0.67356636847346836448512063;
+    x[19] = 0.75925926303735763057728287;
+    x[20] = 0.83344262876083400142102111;
+    x[21] = 0.89499199787827536885104201;
+    x[22] = 0.94297457122897433941401117;
+    x[23] = 0.97666392145951751149831539;
+    x[24] = 0.99555696979049809790878495;
+  }
+  else if ( n == 26 )
+  {
+    x[0] = -0.99588570114561692900321696;
+    x[1] = -0.97838544595647099110058035;
+    x[2] = -0.94715906666171425013591528;
+    x[3] = -0.90263786198430707421766560;
+    x[4] = -0.84544594278849801879750706;
+    x[5] = -0.77638594882067885619296725;
+    x[6] = -0.69642726041995726486381391;
+    x[7] = -0.60669229301761806323197875;
+    x[8] = -0.50844071482450571769570306;
+    x[9] = -0.40305175512348630648107738;
+    x[10] = -0.29200483948595689514283538;
+    x[11] = -0.17685882035689018396905775;
+    x[12] = -0.05923009342931320709371858;
+    x[13] = 0.05923009342931320709371858;
+    x[14] = 0.17685882035689018396905775;
+    x[15] = 0.29200483948595689514283538;
+    x[16] = 0.40305175512348630648107738;
+    x[17] = 0.50844071482450571769570306;
+    x[18] = 0.60669229301761806323197875;
+    x[19] = 0.69642726041995726486381391;
+    x[20] = 0.77638594882067885619296725;
+    x[21] = 0.84544594278849801879750706;
+    x[22] = 0.90263786198430707421766560;
+    x[23] = 0.94715906666171425013591528;
+    x[24] = 0.97838544595647099110058035;
+    x[25] = 0.99588570114561692900321696;
+  }
+  else if ( n == 27 )
+  {
+    x[0] = -0.99617926288898856693888721;
+    x[1] = -0.97992347596150122285587336;
+    x[2] = -0.95090055781470500685190803;
+    x[3] = -0.90948232067749110430064502;
+    x[4] = -0.85620790801829449030273722;
+    x[5] = -0.79177163907050822714439734;
+    x[6] = -0.71701347373942369929481621;
+    x[7] = -0.63290797194649514092773464;
+    x[8] = -0.54055156457945689490030094;
+    x[9] = -0.44114825175002688058597416;
+    x[10] = -0.33599390363850889973031903;
+    x[11] = -0.22645936543953685885723911;
+    x[12] = -0.11397258560952996693289498;
+    x[13] = 0.00000000000000000000000000;
+    x[14] = 0.11397258560952996693289498;
+    x[15] = 0.22645936543953685885723911;
+    x[16] = 0.33599390363850889973031903;
+    x[17] = 0.44114825175002688058597416;
+    x[18] = 0.54055156457945689490030094;
+    x[19] = 0.63290797194649514092773464;
+    x[20] = 0.71701347373942369929481621;
+    x[21] = 0.79177163907050822714439734;
+    x[22] = 0.85620790801829449030273722;
+    x[23] = 0.90948232067749110430064502;
+    x[24] = 0.95090055781470500685190803;
+    x[25] = 0.97992347596150122285587336;
+    x[26] = 0.99617926288898856693888721;
+  }
+  else if ( n == 28 )
+  {
+    x[0] = -0.99644249757395444995043639;
+    x[1] = -0.98130316537087275369455995;
+    x[2] = -0.95425928062893819725410184;
+    x[3] = -0.91563302639213207386968942;
+    x[4] = -0.86589252257439504894225457;
+    x[5] = -0.80564137091717917144788596;
+    x[6] = -0.73561087801363177202814451;
+    x[7] = -0.65665109403886496121989818;
+    x[8] = -0.56972047181140171930800328;
+    x[9] = -0.47587422495511826103441185;
+    x[10] = -0.37625151608907871022135721;
+    x[11] = -0.27206162763517807767682636;
+    x[12] = -0.16456928213338077128147178;
+    x[13] = -0.05507928988403427042651653;
+    x[14] = 0.05507928988403427042651653;
+    x[15] = 0.16456928213338077128147178;
+    x[16] = 0.27206162763517807767682636;
+    x[17] = 0.37625151608907871022135721;
+    x[18] = 0.47587422495511826103441185;
+    x[19] = 0.56972047181140171930800328;
+    x[20] = 0.65665109403886496121989818;
+    x[21] = 0.73561087801363177202814451;
+    x[22] = 0.80564137091717917144788596;
+    x[23] = 0.86589252257439504894225457;
+    x[24] = 0.91563302639213207386968942;
+    x[25] = 0.95425928062893819725410184;
+    x[26] = 0.98130316537087275369455995;
+    x[27] = 0.99644249757395444995043639;
+  }
+  else if ( n == 29 )
+  {
+    x[0] = -0.99667944226059658616319153;
+    x[1] = -0.98254550526141317487092602;
+    x[2] = -0.95728559577808772579820804;
+    x[3] = -0.92118023295305878509375344;
+    x[4] = -0.87463780492010279041779342;
+    x[5] = -0.81818548761525244498957221;
+    x[6] = -0.75246285173447713391261008;
+    x[7] = -0.67821453760268651515618501;
+    x[8] = -0.59628179713822782037958621;
+    x[9] = -0.50759295512422764210262792;
+    x[10] = -0.41315288817400866389070659;
+    x[11] = -0.31403163786763993494819592;
+    x[12] = -0.21135228616600107450637573;
+    x[13] = -0.10627823013267923017098239;
+    x[14] = 0.00000000000000000000000000;
+    x[15] = 0.10627823013267923017098239;
+    x[16] = 0.21135228616600107450637573;
+    x[17] = 0.31403163786763993494819592;
+    x[18] = 0.41315288817400866389070659;
+    x[19] = 0.50759295512422764210262792;
+    x[20] = 0.59628179713822782037958621;
+    x[21] = 0.67821453760268651515618501;
+    x[22] = 0.75246285173447713391261008;
+    x[23] = 0.81818548761525244498957221;
+    x[24] = 0.87463780492010279041779342;
+    x[25] = 0.92118023295305878509375344;
+    x[26] = 0.95728559577808772579820804;
+    x[27] = 0.98254550526141317487092602;
+    x[28] = 0.99667944226059658616319153;
+  }
+  else if ( n == 30 )
+  {
+    x[0] = -0.99689348407464954027163005;
+    x[1] = -0.98366812327974720997003258;
+    x[2] = -0.96002186496830751221687103;
+    x[3] = -0.92620004742927432587932428;
+    x[4] = -0.88256053579205268154311646;
+    x[5] = -0.82956576238276839744289812;
+    x[6] = -0.76777743210482619491797734;
+    x[7] = -0.69785049479331579693229239;
+    x[8] = -0.62052618298924286114047756;
+    x[9] = -0.53662414814201989926416979;
+    x[10] = -0.44703376953808917678060990;
+    x[11] = -0.35270472553087811347103721;
+    x[12] = -0.25463692616788984643980513;
+    x[13] = -0.15386991360858354696379467;
+    x[14] = -0.05147184255531769583302521;
+    x[15] = 0.05147184255531769583302521;
+    x[16] = 0.15386991360858354696379467;
+    x[17] = 0.25463692616788984643980513;
+    x[18] = 0.35270472553087811347103721;
+    x[19] = 0.44703376953808917678060990;
+    x[20] = 0.53662414814201989926416979;
+    x[21] = 0.62052618298924286114047756;
+    x[22] = 0.69785049479331579693229239;
+    x[23] = 0.76777743210482619491797734;
+    x[24] = 0.82956576238276839744289812;
+    x[25] = 0.88256053579205268154311646;
+    x[26] = 0.92620004742927432587932428;
+    x[27] = 0.96002186496830751221687103;
+    x[28] = 0.98366812327974720997003258;
+    x[29] = 0.99689348407464954027163005;
+  }
+  else if ( n == 31 )
+  {
+    x[0] = -0.99708748181947707405562655;
+    x[1] = -0.98468590966515248400246517;
+    x[2] = -0.96250392509294966178905240;
+    x[3] = -0.93075699789664816495694576;
+    x[4] = -0.88976002994827104337419201;
+    x[5] = -0.83992032014626734008690454;
+    x[6] = -0.78173314841662494040636002;
+    x[7] = -0.71577678458685328390597087;
+    x[8] = -0.64270672292426034618441820;
+    x[9] = -0.56324916140714926272094492;
+    x[10] = -0.47819378204490248044059404;
+    x[11] = -0.38838590160823294306135146;
+    x[12] = -0.29471806998170161661790390;
+    x[13] = -0.19812119933557062877241300;
+    x[14] = -0.09955531215234152032517479;
+    x[15] = 0.00000000000000000000000000;
+    x[16] = 0.09955531215234152032517479;
+    x[17] = 0.19812119933557062877241300;
+    x[18] = 0.29471806998170161661790390;
+    x[19] = 0.38838590160823294306135146;
+    x[20] = 0.47819378204490248044059404;
+    x[21] = 0.56324916140714926272094492;
+    x[22] = 0.64270672292426034618441820;
+    x[23] = 0.71577678458685328390597087;
+    x[24] = 0.78173314841662494040636002;
+    x[25] = 0.83992032014626734008690454;
+    x[26] = 0.88976002994827104337419201;
+    x[27] = 0.93075699789664816495694576;
+    x[28] = 0.96250392509294966178905240;
+    x[29] = 0.98468590966515248400246517;
+    x[30] = 0.99708748181947707405562655;
+  }
+  else if ( n == 32 )
+  {
+    x[0] = -0.99726386184948156354498113;
+    x[1] = -0.98561151154526833540017504;
+    x[2] = -0.96476225558750643077381193;
+    x[3] = -0.93490607593773968917091913;
+    x[4] = -0.89632115576605212396530724;
+    x[5] = -0.84936761373256997013369300;
+    x[6] = -0.79448379596794240696309730;
+    x[7] = -0.73218211874028968038742667;
+    x[8] = -0.66304426693021520097511517;
+    x[9] = -0.58771575724076232904074548;
+    x[10] = -0.50689990893222939002374747;
+    x[11] = -0.42135127613063534536411944;
+    x[12] = -0.33186860228212764977991681;
+    x[13] = -0.23928736225213707454460321;
+    x[14] = -0.14447196158279649348518637;
+    x[15] = -0.04830766568773831623481257;
+    x[16] = 0.04830766568773831623481257;
+    x[17] = 0.14447196158279649348518637;
+    x[18] = 0.23928736225213707454460321;
+    x[19] = 0.33186860228212764977991681;
+    x[20] = 0.42135127613063534536411944;
+    x[21] = 0.50689990893222939002374747;
+    x[22] = 0.58771575724076232904074548;
+    x[23] = 0.66304426693021520097511517;
+    x[24] = 0.73218211874028968038742667;
+    x[25] = 0.79448379596794240696309730;
+    x[26] = 0.84936761373256997013369300;
+    x[27] = 0.89632115576605212396530724;
+    x[28] = 0.93490607593773968917091913;
+    x[29] = 0.96476225558750643077381193;
+    x[30] = 0.98561151154526833540017504;
+    x[31] = 0.99726386184948156354498113;
+  }
+  else if ( n == 33 )
+  {
+    x[0] = -0.99742469424645521726616802;
+    x[1] = -0.98645572623064248811037570;
+    x[2] = -0.96682290968999276892837771;
+    x[3] = -0.93869437261116835035583512;
+    x[4] = -0.90231676774343358304053133;
+    x[5] = -0.85800965267650406464306148;
+    x[6] = -0.80616235627416658979620087;
+    x[7] = -0.74723049644956215785905512;
+    x[8] = -0.68173195996974278626821595;
+    x[9] = -0.61024234583637902730728751;
+    x[10] = -0.53338990478634764354889426;
+    x[11] = -0.45185001727245069572599328;
+    x[12] = -0.36633925774807334107022062;
+    x[13] = -0.27760909715249702940324807;
+    x[14] = -0.18643929882799157233579876;
+    x[15] = -0.09363106585473338567074292;
+    x[16] = 0.00000000000000000000000000;
+    x[17] = 0.09363106585473338567074292;
+    x[18] = 0.18643929882799157233579876;
+    x[19] = 0.27760909715249702940324807;
+    x[20] = 0.36633925774807334107022062;
+    x[21] = 0.45185001727245069572599328;
+    x[22] = 0.53338990478634764354889426;
+    x[23] = 0.61024234583637902730728751;
+    x[24] = 0.68173195996974278626821595;
+    x[25] = 0.74723049644956215785905512;
+    x[26] = 0.80616235627416658979620087;
+    x[27] = 0.85800965267650406464306148;
+    x[28] = 0.90231676774343358304053133;
+    x[29] = 0.93869437261116835035583512;
+    x[30] = 0.96682290968999276892837771;
+    x[31] = 0.98645572623064248811037570;
+    x[32] = 0.99742469424645521726616802;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEGENDRE_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::cerr << "  Legal values are 1 through 33.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void legendre_lookup_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_LOOKUP_WEIGHTS looks up weights for Gauss-Legendre quadrature.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Vladimir Krylov,
+//    Approximate Calculation of Integrals,
+//    Dover, 2006,
+//    ISBN: 0486445798.
+//    LC: QA311.K713.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//    Stephen Wolfram,
+//    The Mathematica Book,
+//    Fourth Edition,
+//    Cambridge University Press, 1999,
+//    ISBN: 0-521-64314-7,
+//    LC: QA76.95.W65.
+//
+//    Daniel Zwillinger, editor,
+//    CRC Standard Mathematical Tables and Formulae,
+//    30th Edition,
+//    CRC Press, 1996,
+//    ISBN: 0-8493-2479-3,
+//    LC: QA47.M315.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    N must be between 1 and 33.
+//
+//    Output, double W[N], the weights.
+//
+{
+  if ( n == 1 )
+  {
+    w[0] = 2.000000000000000000000000000000;
+  }
+  else if ( n == 2 )
+  {
+    w[0] = 1.000000000000000000000000000000;
+    w[1] = 1.000000000000000000000000000000;
+  }
+  else if ( n == 3 )
+  {
+    w[0] = 0.555555555555555555555555555556;
+    w[1] = 0.888888888888888888888888888889;
+    w[2] = 0.555555555555555555555555555556;
+  }
+  else if ( n == 4 )
+  {
+    w[0] = 0.347854845137453857373063949222;
+    w[1] = 0.652145154862546142626936050778;
+    w[2] = 0.652145154862546142626936050778;
+    w[3] = 0.347854845137453857373063949222;
+  }
+  else if ( n == 5 )
+  {
+    w[0] = 0.236926885056189087514264040720;
+    w[1] = 0.478628670499366468041291514836;
+    w[2] = 0.568888888888888888888888888889;
+    w[3] = 0.478628670499366468041291514836;
+    w[4] = 0.236926885056189087514264040720;
+  }
+  else if ( n == 6 )
+  {
+    w[0] = 0.171324492379170345040296142173;
+    w[1] = 0.360761573048138607569833513838;
+    w[2] = 0.467913934572691047389870343990;
+    w[3] = 0.467913934572691047389870343990;
+    w[4] = 0.360761573048138607569833513838;
+    w[5] = 0.171324492379170345040296142173;
+  }
+  else if ( n == 7 )
+  {
+    w[0] = 0.129484966168869693270611432679;
+    w[1] = 0.279705391489276667901467771424;
+    w[2] = 0.381830050505118944950369775489;
+    w[3] = 0.417959183673469387755102040816;
+    w[4] = 0.381830050505118944950369775489;
+    w[5] = 0.279705391489276667901467771424;
+    w[6] = 0.129484966168869693270611432679;
+  }
+  else if ( n == 8 )
+  {
+    w[0] = 0.101228536290376259152531354310;
+    w[1] = 0.222381034453374470544355994426;
+    w[2] = 0.313706645877887287337962201987;
+    w[3] = 0.362683783378361982965150449277;
+    w[4] = 0.362683783378361982965150449277;
+    w[5] = 0.313706645877887287337962201987;
+    w[6] = 0.222381034453374470544355994426;
+    w[7] = 0.101228536290376259152531354310;
+  }
+  else if ( n == 9 )
+  {
+    w[0] = 0.081274388361574411971892158111;
+    w[1] = 0.18064816069485740405847203124;
+    w[2] = 0.26061069640293546231874286942;
+    w[3] = 0.31234707704000284006863040658;
+    w[4] = 0.33023935500125976316452506929;
+    w[5] = 0.31234707704000284006863040658;
+    w[6] = 0.26061069640293546231874286942;
+    w[7] = 0.18064816069485740405847203124;
+    w[8] = 0.081274388361574411971892158111;
+  }
+  else if ( n == 10 )
+  {
+    w[0] = 0.066671344308688137593568809893;
+    w[1] = 0.14945134915058059314577633966;
+    w[2] = 0.21908636251598204399553493423;
+    w[3] = 0.26926671930999635509122692157;
+    w[4] = 0.29552422471475287017389299465;
+    w[5] = 0.29552422471475287017389299465;
+    w[6] = 0.26926671930999635509122692157;
+    w[7] = 0.21908636251598204399553493423;
+    w[8] = 0.14945134915058059314577633966;
+    w[9] = 0.066671344308688137593568809893;
+  }
+  else if ( n == 11 )
+  {
+    w[0] = 0.055668567116173666482753720443;
+    w[1] = 0.12558036946490462463469429922;
+    w[2] = 0.18629021092773425142609764143;
+    w[3] = 0.23319376459199047991852370484;
+    w[4] = 0.26280454451024666218068886989;
+    w[5] = 0.27292508677790063071448352834;
+    w[6] = 0.26280454451024666218068886989;
+    w[7] = 0.23319376459199047991852370484;
+    w[8] = 0.18629021092773425142609764143;
+    w[9] = 0.12558036946490462463469429922;
+    w[10] = 0.055668567116173666482753720443;
+  }
+  else if ( n == 12 )
+  {
+    w[0] = 0.047175336386511827194615961485;
+    w[1] = 0.10693932599531843096025471819;
+    w[2] = 0.16007832854334622633465252954;
+    w[3] = 0.20316742672306592174906445581;
+    w[4] = 0.23349253653835480876084989892;
+    w[5] = 0.24914704581340278500056243604;
+    w[6] = 0.24914704581340278500056243604;
+    w[7] = 0.23349253653835480876084989892;
+    w[8] = 0.20316742672306592174906445581;
+    w[9] = 0.16007832854334622633465252954;
+    w[10] = 0.10693932599531843096025471819;
+    w[11] = 0.047175336386511827194615961485;
+  }
+  else if ( n == 13 )
+  {
+    w[0] = 0.040484004765315879520021592201;
+    w[1] = 0.092121499837728447914421775954;
+    w[2] = 0.13887351021978723846360177687;
+    w[3] = 0.17814598076194573828004669200;
+    w[4] = 0.20781604753688850231252321931;
+    w[5] = 0.22628318026289723841209018604;
+    w[6] = 0.23255155323087391019458951527;
+    w[7] = 0.22628318026289723841209018604;
+    w[8] = 0.20781604753688850231252321931;
+    w[9] = 0.17814598076194573828004669200;
+    w[10] = 0.13887351021978723846360177687;
+    w[11] = 0.092121499837728447914421775954;
+    w[12] = 0.040484004765315879520021592201;
+  }
+  else if ( n == 14 )
+  {
+    w[0] = 0.035119460331751863031832876138;
+    w[1] = 0.08015808715976020980563327706;
+    w[2] = 0.12151857068790318468941480907;
+    w[3] = 0.15720316715819353456960193862;
+    w[4] = 0.18553839747793781374171659013;
+    w[5] = 0.20519846372129560396592406566;
+    w[6] = 0.21526385346315779019587644332;
+    w[7] = 0.21526385346315779019587644332;
+    w[8] = 0.20519846372129560396592406566;
+    w[9] = 0.18553839747793781374171659013;
+    w[10] = 0.15720316715819353456960193862;
+    w[11] = 0.12151857068790318468941480907;
+    w[12] = 0.08015808715976020980563327706;
+    w[13] = 0.035119460331751863031832876138;
+  }
+  else if ( n == 15 )
+  {
+    w[0] = 0.030753241996117268354628393577;
+    w[1] = 0.070366047488108124709267416451;
+    w[2] = 0.107159220467171935011869546686;
+    w[3] = 0.13957067792615431444780479451;
+    w[4] = 0.16626920581699393355320086048;
+    w[5] = 0.18616100001556221102680056187;
+    w[6] = 0.19843148532711157645611832644;
+    w[7] = 0.20257824192556127288062019997;
+    w[8] = 0.19843148532711157645611832644;
+    w[9] = 0.18616100001556221102680056187;
+    w[10] = 0.16626920581699393355320086048;
+    w[11] = 0.13957067792615431444780479451;
+    w[12] = 0.107159220467171935011869546686;
+    w[13] = 0.070366047488108124709267416451;
+    w[14] = 0.030753241996117268354628393577;
+  }
+  else if ( n == 16 )
+  {
+    w[0] = 0.027152459411754094851780572456;
+    w[1] = 0.062253523938647892862843836994;
+    w[2] = 0.09515851168249278480992510760;
+    w[3] = 0.12462897125553387205247628219;
+    w[4] = 0.14959598881657673208150173055;
+    w[5] = 0.16915651939500253818931207903;
+    w[6] = 0.18260341504492358886676366797;
+    w[7] = 0.18945061045506849628539672321;
+    w[8] = 0.18945061045506849628539672321;
+    w[9] = 0.18260341504492358886676366797;
+    w[10] = 0.16915651939500253818931207903;
+    w[11] = 0.14959598881657673208150173055;
+    w[12] = 0.12462897125553387205247628219;
+    w[13] = 0.09515851168249278480992510760;
+    w[14] = 0.062253523938647892862843836994;
+    w[15] = 0.027152459411754094851780572456;
+  }
+  else if ( n == 17 )
+  {
+    w[0] = 0.024148302868547931960110026288;
+    w[1] = 0.055459529373987201129440165359;
+    w[2] = 0.085036148317179180883535370191;
+    w[3] = 0.111883847193403971094788385626;
+    w[4] = 0.13513636846852547328631998170;
+    w[5] = 0.15404576107681028808143159480;
+    w[6] = 0.16800410215645004450997066379;
+    w[7] = 0.17656270536699264632527099011;
+    w[8] = 0.17944647035620652545826564426;
+    w[9] = 0.17656270536699264632527099011;
+    w[10] = 0.16800410215645004450997066379;
+    w[11] = 0.15404576107681028808143159480;
+    w[12] = 0.13513636846852547328631998170;
+    w[13] = 0.111883847193403971094788385626;
+    w[14] = 0.085036148317179180883535370191;
+    w[15] = 0.055459529373987201129440165359;
+    w[16] = 0.024148302868547931960110026288;
+  }
+  else if ( n == 18 )
+  {
+    w[0] = 0.021616013526483310313342710266;
+    w[1] = 0.049714548894969796453334946203;
+    w[2] = 0.07642573025488905652912967762;
+    w[3] = 0.10094204410628716556281398492;
+    w[4] = 0.12255520671147846018451912680;
+    w[5] = 0.14064291467065065120473130375;
+    w[6] = 0.15468467512626524492541800384;
+    w[7] = 0.16427648374583272298605377647;
+    w[8] = 0.16914238296314359184065647013;
+    w[9] = 0.16914238296314359184065647013;
+    w[10] = 0.16427648374583272298605377647;
+    w[11] = 0.15468467512626524492541800384;
+    w[12] = 0.14064291467065065120473130375;
+    w[13] = 0.12255520671147846018451912680;
+    w[14] = 0.10094204410628716556281398492;
+    w[15] = 0.07642573025488905652912967762;
+    w[16] = 0.049714548894969796453334946203;
+    w[17] = 0.021616013526483310313342710266;
+  }
+  else if ( n == 19 )
+  {
+    w[0] = 0.019461788229726477036312041464;
+    w[1] = 0.044814226765699600332838157402;
+    w[2] = 0.069044542737641226580708258006;
+    w[3] = 0.091490021622449999464462094124;
+    w[4] = 0.111566645547333994716023901682;
+    w[5] = 0.12875396253933622767551578486;
+    w[6] = 0.14260670217360661177574610944;
+    w[7] = 0.15276604206585966677885540090;
+    w[8] = 0.15896884339395434764995643946;
+    w[9] = 0.16105444984878369597916362532;
+    w[10] = 0.15896884339395434764995643946;
+    w[11] = 0.15276604206585966677885540090;
+    w[12] = 0.14260670217360661177574610944;
+    w[13] = 0.12875396253933622767551578486;
+    w[14] = 0.111566645547333994716023901682;
+    w[15] = 0.091490021622449999464462094124;
+    w[16] = 0.069044542737641226580708258006;
+    w[17] = 0.044814226765699600332838157402;
+    w[18] = 0.019461788229726477036312041464;
+  }
+  else if ( n == 20 )
+  {
+    w[0] = 0.017614007139152118311861962352;
+    w[1] = 0.040601429800386941331039952275;
+    w[2] = 0.062672048334109063569506535187;
+    w[3] = 0.08327674157670474872475814322;
+    w[4] = 0.10193011981724043503675013548;
+    w[5] = 0.11819453196151841731237737771;
+    w[6] = 0.13168863844917662689849449975;
+    w[7] = 0.14209610931838205132929832507;
+    w[8] = 0.14917298647260374678782873700;
+    w[9] = 0.15275338713072585069808433195;
+    w[10] = 0.15275338713072585069808433195;
+    w[11] = 0.14917298647260374678782873700;
+    w[12] = 0.14209610931838205132929832507;
+    w[13] = 0.13168863844917662689849449975;
+    w[14] = 0.11819453196151841731237737771;
+    w[15] = 0.10193011981724043503675013548;
+    w[16] = 0.08327674157670474872475814322;
+    w[17] = 0.062672048334109063569506535187;
+    w[18] = 0.040601429800386941331039952275;
+    w[19] = 0.017614007139152118311861962352;
+  }
+  else if ( n == 21 )
+  {
+    w[ 0] =   0.016017228257774333324224616858;
+    w[ 1] =   0.036953789770852493799950668299;
+    w[ 2] =   0.057134425426857208283635826472;
+    w[ 3] =   0.076100113628379302017051653300;
+    w[ 4] =   0.093444423456033861553289741114;
+    w[ 5] =   0.108797299167148377663474578070;
+    w[ 6] =   0.12183141605372853419536717713;
+    w[ 7] =   0.13226893863333746178105257450;
+    w[ 8] =   0.13988739479107315472213342387;
+    w[9] =   0.14452440398997005906382716655;
+    w[10] =   0.14608113364969042719198514768;
+    w[11] =   0.14452440398997005906382716655;
+    w[12] =   0.13988739479107315472213342387;
+    w[13] =   0.13226893863333746178105257450;
+    w[14] =   0.12183141605372853419536717713;
+    w[15] =   0.108797299167148377663474578070;
+    w[16] =   0.093444423456033861553289741114;
+    w[17] =   0.076100113628379302017051653300;
+    w[18] =   0.057134425426857208283635826472;
+    w[19] =   0.036953789770852493799950668299;
+    w[20] =   0.016017228257774333324224616858;
+  }
+  else if ( n == 22 )
+  {
+    w[0] = 0.014627995298272200684991098047;
+    w[1] = 0.033774901584814154793302246866;
+    w[2] = 0.052293335152683285940312051273;
+    w[3] = 0.06979646842452048809496141893;
+    w[4] = 0.08594160621706772741444368137;
+    w[5] = 0.10041414444288096493207883783;
+    w[6] = 0.11293229608053921839340060742;
+    w[7] = 0.12325237681051242428556098615;
+    w[8] = 0.13117350478706237073296499253;
+    w[9] = 0.13654149834601517135257383123;
+    w[10] = 0.13925187285563199337541024834;
+    w[11] = 0.13925187285563199337541024834;
+    w[12] = 0.13654149834601517135257383123;
+    w[13] = 0.13117350478706237073296499253;
+    w[14] = 0.12325237681051242428556098615;
+    w[15] = 0.11293229608053921839340060742;
+    w[16] = 0.10041414444288096493207883783;
+    w[17] = 0.08594160621706772741444368137;
+    w[18] = 0.06979646842452048809496141893;
+    w[19] = 0.052293335152683285940312051273;
+    w[20] = 0.033774901584814154793302246866;
+    w[21] = 0.014627995298272200684991098047;
+  }
+  else if ( n == 23 )
+  {
+    w[0] = 0.013411859487141772081309493459;
+    w[1] = 0.030988005856979444310694219642;
+    w[2] = 0.048037671731084668571641071632;
+    w[3] = 0.064232421408525852127169615159;
+    w[4] = 0.079281411776718954922892524742;
+    w[5] = 0.092915766060035147477018617370;
+    w[6] = 0.104892091464541410074086185015;
+    w[7] = 0.11499664022241136494164351293;
+    w[8] = 0.12304908430672953046757840067;
+    w[9] = 0.12890572218808214997859533940;
+    w[10] = 0.13246203940469661737164246470;
+    w[11] = 0.13365457218610617535145711055;
+    w[12] = 0.13246203940469661737164246470;
+    w[13] = 0.12890572218808214997859533940;
+    w[14] = 0.12304908430672953046757840067;
+    w[15] = 0.11499664022241136494164351293;
+    w[16] = 0.104892091464541410074086185015;
+    w[17] = 0.092915766060035147477018617370;
+    w[18] = 0.079281411776718954922892524742;
+    w[19] = 0.064232421408525852127169615159;
+    w[20] = 0.048037671731084668571641071632;
+    w[21] = 0.030988005856979444310694219642;
+    w[22] = 0.013411859487141772081309493459;
+  }
+  else if ( n == 24 )
+  {
+    w[0] = 0.012341229799987199546805667070;
+    w[1] = 0.028531388628933663181307815952;
+    w[2] = 0.044277438817419806168602748211;
+    w[3] = 0.059298584915436780746367758500;
+    w[4] = 0.07334648141108030573403361525;
+    w[5] = 0.08619016153195327591718520298;
+    w[6] = 0.09761865210411388826988066446;
+    w[7] = 0.10744427011596563478257734245;
+    w[8] = 0.11550566805372560135334448391;
+    w[9] = 0.12167047292780339120446315348;
+    w[10] = 0.12583745634682829612137538251;
+    w[11] = 0.12793819534675215697405616522;
+    w[12] = 0.12793819534675215697405616522;
+    w[13] = 0.12583745634682829612137538251;
+    w[14] = 0.12167047292780339120446315348;
+    w[15] = 0.11550566805372560135334448391;
+    w[16] = 0.10744427011596563478257734245;
+    w[17] = 0.09761865210411388826988066446;
+    w[18] = 0.08619016153195327591718520298;
+    w[19] = 0.07334648141108030573403361525;
+    w[20] = 0.059298584915436780746367758500;
+    w[21] = 0.044277438817419806168602748211;
+    w[22] = 0.028531388628933663181307815952;
+    w[23] = 0.012341229799987199546805667070;
+  }
+  else if ( n == 25 )
+  {
+    w[0] = 0.0113937985010262879479029641132;
+    w[1] = 0.026354986615032137261901815295;
+    w[2] = 0.040939156701306312655623487712;
+    w[3] = 0.054904695975835191925936891541;
+    w[4] = 0.068038333812356917207187185657;
+    w[5] = 0.080140700335001018013234959669;
+    w[6] = 0.091028261982963649811497220703;
+    w[7] = 0.100535949067050644202206890393;
+    w[8] = 0.108519624474263653116093957050;
+    w[9] = 0.11485825914571164833932554587;
+    w[10] = 0.11945576353578477222817812651;
+    w[11] = 0.12224244299031004168895951895;
+    w[12] = 0.12317605372671545120390287308;
+    w[13] = 0.12224244299031004168895951895;
+    w[14] = 0.11945576353578477222817812651;
+    w[15] = 0.11485825914571164833932554587;
+    w[16] = 0.108519624474263653116093957050;
+    w[17] = 0.100535949067050644202206890393;
+    w[18] = 0.091028261982963649811497220703;
+    w[19] = 0.080140700335001018013234959669;
+    w[20] = 0.068038333812356917207187185657;
+    w[21] = 0.054904695975835191925936891541;
+    w[22] = 0.040939156701306312655623487712;
+    w[23] = 0.026354986615032137261901815295;
+    w[24] = 0.0113937985010262879479029641132;
+  }
+  else if ( n == 26 )
+  {
+    w[0] = 0.010551372617343007155651187685;
+    w[1] = 0.024417851092631908789615827520;
+    w[2] = 0.037962383294362763950303141249;
+    w[3] = 0.050975825297147811998319900724;
+    w[4] = 0.063274046329574835539453689907;
+    w[5] = 0.07468414976565974588707579610;
+    w[6] = 0.08504589431348523921044776508;
+    w[7] = 0.09421380035591414846366488307;
+    w[8] = 0.10205916109442542323841407025;
+    w[9] = 0.10847184052857659065657942673;
+    w[10] = 0.11336181654631966654944071844;
+    w[11] = 0.11666044348529658204466250754;
+    w[12] = 0.11832141527926227651637108570;
+    w[13] = 0.11832141527926227651637108570;
+    w[14] = 0.11666044348529658204466250754;
+    w[15] = 0.11336181654631966654944071844;
+    w[16] = 0.10847184052857659065657942673;
+    w[17] = 0.10205916109442542323841407025;
+    w[18] = 0.09421380035591414846366488307;
+    w[19] = 0.08504589431348523921044776508;
+    w[20] = 0.07468414976565974588707579610;
+    w[21] = 0.063274046329574835539453689907;
+    w[22] = 0.050975825297147811998319900724;
+    w[23] = 0.037962383294362763950303141249;
+    w[24] = 0.024417851092631908789615827520;
+    w[25] = 0.010551372617343007155651187685;
+  }
+  else if ( n == 27 )
+  {
+    w[0] = 0.0097989960512943602611500550912;
+    w[1] = 0.022686231596180623196034206447;
+    w[2] = 0.035297053757419711022578289305;
+    w[3] = 0.047449412520615062704096710114;
+    w[4] = 0.058983536859833599110300833720;
+    w[5] = 0.069748823766245592984322888357;
+    w[6] = 0.079604867773057771263074959010;
+    w[7] = 0.088423158543756950194322802854;
+    w[8] = 0.096088727370028507565652646558;
+    w[9] = 0.102501637817745798671247711533;
+    w[10] = 0.107578285788533187212162984427;
+    w[11] = 0.111252488356845192672163096043;
+    w[12] = 0.113476346108965148620369948092;
+    w[13] = 0.11422086737895698904504573690;
+    w[14] = 0.113476346108965148620369948092;
+    w[15] = 0.111252488356845192672163096043;
+    w[16] = 0.107578285788533187212162984427;
+    w[17] = 0.102501637817745798671247711533;
+    w[18] = 0.096088727370028507565652646558;
+    w[19] = 0.088423158543756950194322802854;
+    w[20] = 0.079604867773057771263074959010;
+    w[21] = 0.069748823766245592984322888357;
+    w[22] = 0.058983536859833599110300833720;
+    w[23] = 0.047449412520615062704096710114;
+    w[24] = 0.035297053757419711022578289305;
+    w[25] = 0.022686231596180623196034206447;
+    w[26] = 0.0097989960512943602611500550912;
+  }
+  else if ( n == 28 )
+  {
+    w[0] = 0.009124282593094517738816153923;
+    w[1] = 0.021132112592771259751500380993;
+    w[2] = 0.032901427782304379977630819171;
+    w[3] = 0.044272934759004227839587877653;
+    w[4] = 0.055107345675716745431482918227;
+    w[5] = 0.06527292396699959579339756678;
+    w[6] = 0.07464621423456877902393188717;
+    w[7] = 0.08311341722890121839039649824;
+    w[8] = 0.09057174439303284094218603134;
+    w[9] = 0.09693065799792991585048900610;
+    w[10] = 0.10211296757806076981421663851;
+    w[11] = 0.10605576592284641791041643700;
+    w[12] = 0.10871119225829413525357151930;
+    w[13] = 0.11004701301647519628237626560;
+    w[14] = 0.11004701301647519628237626560;
+    w[15] = 0.10871119225829413525357151930;
+    w[16] = 0.10605576592284641791041643700;
+    w[17] = 0.10211296757806076981421663851;
+    w[18] = 0.09693065799792991585048900610;
+    w[19] = 0.09057174439303284094218603134;
+    w[20] = 0.08311341722890121839039649824;
+    w[21] = 0.07464621423456877902393188717;
+    w[22] = 0.06527292396699959579339756678;
+    w[23] = 0.055107345675716745431482918227;
+    w[24] = 0.044272934759004227839587877653;
+    w[25] = 0.032901427782304379977630819171;
+    w[26] = 0.021132112592771259751500380993;
+    w[27] = 0.009124282593094517738816153923;
+  }
+  else if ( n == 29 )
+  {
+    w[0] = 0.0085169038787464096542638133022;
+    w[1] = 0.019732085056122705983859801640;
+    w[2] = 0.030740492202093622644408525375;
+    w[3] = 0.041402062518682836104830010114;
+    w[4] = 0.051594826902497923912594381180;
+    w[5] = 0.061203090657079138542109848024;
+    w[6] = 0.070117933255051278569581486949;
+    w[7] = 0.078238327135763783828144888660;
+    w[8] = 0.085472257366172527545344849297;
+    w[9] = 0.091737757139258763347966411077;
+    w[10] = 0.096963834094408606301900074883;
+    w[11] = 0.101091273759914966121820546907;
+    w[12] = 0.104073310077729373913328471285;
+    w[13] = 0.105876155097320941406591327852;
+    w[14] = 0.10647938171831424424651112691;
+    w[15] = 0.105876155097320941406591327852;
+    w[16] = 0.104073310077729373913328471285;
+    w[17] = 0.101091273759914966121820546907;
+    w[18] = 0.096963834094408606301900074883;
+    w[19] = 0.091737757139258763347966411077;
+    w[20] = 0.085472257366172527545344849297;
+    w[21] = 0.078238327135763783828144888660;
+    w[22] = 0.070117933255051278569581486949;
+    w[23] = 0.061203090657079138542109848024;
+    w[24] = 0.051594826902497923912594381180;
+    w[25] = 0.041402062518682836104830010114;
+    w[26] = 0.030740492202093622644408525375;
+    w[27] = 0.019732085056122705983859801640;
+    w[28] = 0.0085169038787464096542638133022;
+  }
+  else if ( n == 30 )
+  {
+    w[0] = 0.007968192496166605615465883475;
+    w[1] = 0.018466468311090959142302131912;
+    w[2] = 0.028784707883323369349719179611;
+    w[3] = 0.038799192569627049596801936446;
+    w[4] = 0.048402672830594052902938140423;
+    w[5] = 0.057493156217619066481721689402;
+    w[6] = 0.06597422988218049512812851512;
+    w[7] = 0.07375597473770520626824385002;
+    w[8] = 0.08075589522942021535469493846;
+    w[9] = 0.08689978720108297980238753072;
+    w[10] = 0.09212252223778612871763270709;
+    w[11] = 0.09636873717464425963946862635;
+    w[12] = 0.09959342058679526706278028210;
+    w[13] = 0.10176238974840550459642895217;
+    w[14] = 0.10285265289355884034128563671;
+    w[15] = 0.10285265289355884034128563671;
+    w[16] = 0.10176238974840550459642895217;
+    w[17] = 0.09959342058679526706278028210;
+    w[18] = 0.09636873717464425963946862635;
+    w[19] = 0.09212252223778612871763270709;
+    w[20] = 0.08689978720108297980238753072;
+    w[21] = 0.08075589522942021535469493846;
+    w[22] = 0.07375597473770520626824385002;
+    w[23] = 0.06597422988218049512812851512;
+    w[24] = 0.057493156217619066481721689402;
+    w[25] = 0.048402672830594052902938140423;
+    w[26] = 0.038799192569627049596801936446;
+    w[27] = 0.028784707883323369349719179611;
+    w[28] = 0.018466468311090959142302131912;
+    w[29] = 0.007968192496166605615465883475;
+  }
+  else if ( n == 31 )
+  {
+    w[0] = 0.0074708315792487758586968750322;
+    w[1] = 0.017318620790310582463157996087;
+    w[2] = 0.027009019184979421800608708092;
+    w[3] = 0.036432273912385464024392010468;
+    w[4] = 0.045493707527201102902315857895;
+    w[5] = 0.054103082424916853711666259087;
+    w[6] = 0.062174786561028426910343543687;
+    w[7] = 0.069628583235410366167756126255;
+    w[8] = 0.076390386598776616426357674901;
+    w[9] = 0.082392991761589263903823367432;
+    w[10] = 0.087576740608477876126198069695;
+    w[11] = 0.091890113893641478215362871607;
+    w[12] = 0.095290242912319512807204197488;
+    w[13] = 0.097743335386328725093474010979;
+    w[14] = 0.099225011226672307874875514429;
+    w[15] = 0.09972054479342645142753383373;
+    w[16] = 0.099225011226672307874875514429;
+    w[17] = 0.097743335386328725093474010979;
+    w[18] = 0.095290242912319512807204197488;
+    w[19] = 0.091890113893641478215362871607;
+    w[20] = 0.087576740608477876126198069695;
+    w[21] = 0.082392991761589263903823367432;
+    w[22] = 0.076390386598776616426357674901;
+    w[23] = 0.069628583235410366167756126255;
+    w[24] = 0.062174786561028426910343543687;
+    w[25] = 0.054103082424916853711666259087;
+    w[26] = 0.045493707527201102902315857895;
+    w[27] = 0.036432273912385464024392010468;
+    w[28] = 0.027009019184979421800608708092;
+    w[29] = 0.017318620790310582463157996087;
+    w[30] = 0.0074708315792487758586968750322;
+  }
+  else if ( n == 32 )
+  {
+    w[0] = 0.007018610009470096600407063739;
+    w[1] = 0.016274394730905670605170562206;
+    w[2] = 0.025392065309262059455752589789;
+    w[3] = 0.034273862913021433102687732252;
+    w[4] = 0.042835898022226680656878646606;
+    w[5] = 0.050998059262376176196163244690;
+    w[6] = 0.058684093478535547145283637300;
+    w[7] = 0.06582222277636184683765006371;
+    w[8] = 0.07234579410884850622539935648;
+    w[9] = 0.07819389578707030647174091883;
+    w[10] = 0.08331192422694675522219907460;
+    w[11] = 0.08765209300440381114277146275;
+    w[12] = 0.09117387869576388471286857711;
+    w[13] = 0.09384439908080456563918023767;
+    w[14] = 0.09563872007927485941908200220;
+    w[15] = 0.09654008851472780056676483006;
+    w[16] = 0.09654008851472780056676483006;
+    w[17] = 0.09563872007927485941908200220;
+    w[18] = 0.09384439908080456563918023767;
+    w[19] = 0.09117387869576388471286857711;
+    w[20] = 0.08765209300440381114277146275;
+    w[21] = 0.08331192422694675522219907460;
+    w[22] = 0.07819389578707030647174091883;
+    w[23] = 0.07234579410884850622539935648;
+    w[24] = 0.06582222277636184683765006371;
+    w[25] = 0.058684093478535547145283637300;
+    w[26] = 0.050998059262376176196163244690;
+    w[27] = 0.042835898022226680656878646606;
+    w[28] = 0.034273862913021433102687732252;
+    w[29] = 0.025392065309262059455752589789;
+    w[30] = 0.016274394730905670605170562206;
+    w[31] = 0.007018610009470096600407063739;
+  }
+  else if ( n == 33 )
+  {
+    w[0] = 0.0066062278475873780586492352085;
+    w[1] = 0.015321701512934676127945768534;
+    w[2] = 0.023915548101749480350533257529;
+    w[3] = 0.032300358632328953281561447250;
+    w[4] = 0.040401541331669591563409790527;
+    w[5] = 0.048147742818711695670146880138;
+    w[6] = 0.055470846631663561284944495439;
+    w[7] = 0.062306482530317480031627725771;
+    w[8] = 0.068594572818656712805955073015;
+    w[9] = 0.074279854843954149342472175919;
+    w[10] = 0.079312364794886738363908384942;
+    w[11] = 0.083647876067038707613928014518;
+    w[12] = 0.087248287618844337607281670945;
+    w[13] = 0.090081958660638577239743705500;
+    w[14] = 0.092123986643316846213240977717;
+    w[15] = 0.093356426065596116160999126274;
+    w[16] = 0.09376844616020999656730454155;
+    w[17] = 0.093356426065596116160999126274;
+    w[18] = 0.092123986643316846213240977717;
+    w[19] = 0.090081958660638577239743705500;
+    w[20] = 0.087248287618844337607281670945;
+    w[21] = 0.083647876067038707613928014518;
+    w[22] = 0.079312364794886738363908384942;
+    w[23] = 0.074279854843954149342472175919;
+    w[24] = 0.068594572818656712805955073015;
+    w[25] = 0.062306482530317480031627725771;
+    w[26] = 0.055470846631663561284944495439;
+    w[27] = 0.048147742818711695670146880138;
+    w[28] = 0.040401541331669591563409790527;
+    w[29] = 0.032300358632328953281561447250;
+    w[30] = 0.023915548101749480350533257529;
+    w[31] = 0.015321701512934676127945768534;
+    w[32] = 0.0066062278475873780586492352085;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEGENDRE_LOOKUP_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Illegal value of N = " << n << "\n";
+    std::cerr << "  Legal values are 1 through 33.\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+double *legendre_zeros ( int order )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEGENDRE_ZEROS returns the zeros of the Legendre polynomial of degree N.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 June 2011
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Philip Davis, Philip Rabinowitz.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Philip Davis, Philip Rabinowitz,
+//    Methods of Numerical Integration,
+//    Second Edition,
+//    Dover, 2007,
+//    ISBN: 0486453391,
+//    LC: QA299.3.D28.
+//
+//  Parameters:
+//
+//    Input, int ORDER, the order.
+//    ORDER must be greater than 0.
+//
+//    Output, double LEGENDRE_ZEROS[ORDER], the zeros.
+//
+{
+  double d1;
+  double d2pn;
+  double d3pn;
+  double d4pn;
+  double dp;
+  double dpn;
+  double e1;
+  double fx;
+  double h;
+  int i;
+  int iback;
+  int k;
+  int m;
+  int mp1mi;
+  int ncopy;
+  int nmove;
+  double p;
+  double pi = 3.141592653589793;
+  double pk;
+  double pkm1;
+  double pkp1;
+  double t;
+  double u;
+  double v;
+  double x0;
+  double *xtab;
+  double xtemp;
+
+  xtab = new double[order];
+
+  e1 = ( double ) ( order * ( order + 1 ) );
+
+  m = ( order + 1 ) / 2;
+
+  for ( i = 1; i <= m; i++ )
+  {
+    mp1mi = m + 1 - i;
+
+    t = ( double ) ( 4 * i - 1 ) * pi / ( double ) ( 4 * order + 2 );
+
+    x0 = std::cos ( t ) * ( 1.0 - ( 1.0 - 1.0 / ( double ) ( order ) )
+      / ( double ) ( 8 * order * order ) );
+
+    pkm1 = 1.0;
+    pk = x0;
+
+    for ( k = 2; k <= order; k++ )
+    {
+      pkp1 = 2.0 * x0 * pk - pkm1 - ( x0 * pk - pkm1 ) / ( double ) ( k );
+      pkm1 = pk;
+      pk = pkp1;
+    }
+
+    d1 = ( double ) ( order ) * ( pkm1 - x0 * pk );
+
+    dpn = d1 / ( 1.0 - x0 * x0 );
+
+    d2pn = ( 2.0 * x0 * dpn - e1 * pk ) / ( 1.0 - x0 * x0 );
+
+    d3pn = ( 4.0 * x0 * d2pn + ( 2.0 - e1 ) * dpn ) / ( 1.0 - x0 * x0 );
+
+    d4pn = ( 6.0 * x0 * d3pn + ( 6.0 - e1 ) * d2pn ) / ( 1.0 - x0 * x0 );
+
+    u = pk / dpn;
+    v = d2pn / dpn;
+//
+//  Initial approximation H:
+//
+    h = - u * ( 1.0 + 0.5 * u * ( v + u * ( v * v - d3pn / ( 3.0 * dpn ) ) ) );
+//
+//  Refine H using one step of Newton's method:
+//
+    p = pk + h * ( dpn + 0.5 * h * ( d2pn + h / 3.0
+      * ( d3pn + 0.25 * h * d4pn ) ) );
+
+    dp = dpn + h * ( d2pn + 0.5 * h * ( d3pn + h * d4pn / 3.0 ) );
+
+    h = h - p / dp;
+
+    xtemp = x0 + h;
+
+    xtab[mp1mi-1] = xtemp;
+
+    fx = d1 - h * e1 * ( pk + 0.5 * h * ( dpn + h / 3.0
+      * ( d2pn + 0.25 * h * ( d3pn + 0.2 * h * d4pn ) ) ) );
+  }
+
+  if ( ( order % 2 ) == 1 )
+  {
+    xtab[0] = 0.0;
+  }
+//
+//  Shift the data up.
+//
+  nmove = ( order + 1 ) / 2;
+  ncopy = order - nmove;
+
+  for ( i = 1; i <= nmove; i++ )
+  {
+    iback = order + 1 - i;
+    xtab[iback-1] = xtab[iback-ncopy-1];
+  }
+//
+//  Reflect values for the negative abscissas.
+//
+  for ( i = 1; i <= order - nmove; i++ )
+  {
+    xtab[i-1] = - xtab[order-i];
+  }
+
+  return xtab;
+}
+//****************************************************************************80
+
+void level_growth_to_order ( int dim_num, int level[], int rule[],
+  int growth[], int order[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_GROWTH_TO_ORDER: convert Level and Growth to Order.
+//
+//  Discussion:
+//
+//    This function is given level, rule, and growth information
+//    for each dimension of a quadrature rule, and determines the
+//    corresponding order of the rule in each dimension.
+//
+//    This is a revised version of LEVEL_GROWTH_TO_ORDER.
+//
+//    In particular, it revises the interpretation of the RULE vector as
+//    far as the values 10, 11, and 12 are concerned.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    16 October 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int LEVEL[DIM_NUM], the 1D levels.
+//
+//    Input, int RULE[DIM_NUM], the rule in each dimension.
+//     1, "CC",  Clenshaw Curtis, Closed Fully Nested.
+//     2, "F2",  Fejer Type 2, Open Fully Nested.
+//     3, "GP",  Gauss Patterson, Open Fully Nested.
+//     4, "GL",  Gauss Legendre, Open Weakly Nested.
+//     5, "GH",  Gauss Hermite, Open Weakly Nested.
+//     6, "GGH", Generalized Gauss Hermite, Open Weakly Nested.
+//     7, "LG",  Gauss Laguerre, Open Non Nested.
+//     8, "GLG", Generalized Gauss Laguerre, Open Non Nested.
+//     9, "GJ",  Gauss Jacobi, Open Non Nested.
+//    10, "HGK", Hermite Genz-Keister, Open Fully Nested.
+//    11, "UO",  User supplied Open, presumably Non Nested.
+//    12, "UC",  User supplied Closed, presumably Non Nested.
+//
+//    Input, int GROWTH[DIM_NUM], the desired growth in each dimension.
+//    0, "DF", default growth associated with this quadrature rule;
+//    1, "SL", slow linear, L+1;
+//    2  "SO", slow linear odd, O=1+2((L+1)/2)
+//    3, "ML", moderate linear, 2L+1;
+//    4, "SE", slow exponential;
+//    5, "ME", moderate exponential;
+//    6, "FE", full exponential.
+//
+//    Output, int ORDER[DIM_NUM], the 1D orders (number of points).
+//
+{
+  int dim;
+  int l;
+  int o;
+  static int o_hgk[6] = { 1, 3, 9, 19, 35, 43 };
+  int p;
+  static int p_hgk[6] = { 1, 5, 15, 29, 51, 67 };
+//
+//  Check the input.
+//
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    if ( level[dim] < 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+      std::cerr << "  Negative value of LEVEL[DIM]!\n";
+      std::cerr << "  LEVEL[" << dim << "] = " << level[dim] << "\n";
+      std::exit ( 1 );
+    }
+
+    if ( rule[dim] < 1 || 12 < rule[dim] )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+      std::cerr << "  Illegal value of RULE[DIM]!\n";
+      std::cerr << "  RULE[" << dim << "] = " << rule[dim] << "\n";
+      std::exit ( 1 );
+    }
+
+    if ( growth[dim] < 0 || 6 < growth[dim] )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+      std::cerr << "  Illegal value of GROWTH[DIM]!\n";
+      std::cerr << "  GROWTH[" << dim << "] = " << growth[dim] << "\n";
+      std::exit ( 1 );
+    }
+  }
+//
+//  Compute the order vector.
+//
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+//
+//  CC
+//  Default is Moderate Exponential Growth.
+//
+    if ( rule[dim] == 1 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          o = 2;
+          while ( o < 2 * level[dim] + 1 )
+          {
+            o = 2 * ( o - 1 ) + 1;
+          }
+        }
+      }
+      else if ( growth[dim] == 5 || growth[dim] == 0 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          o = 2;
+          while ( o < 4 * level[dim] + 1 )
+          {
+            o = 2 * ( o - 1 ) + 1;
+          }
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          o = webbur::i4_power ( 2, level[dim] ) + 1;
+        }
+      }
+    }
+//
+//  F2
+//  Default is Moderate Exponential Growth.
+//
+    else if ( rule[dim] == 2 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( o < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 || growth[dim] == 0 )
+      {
+        o = 1;
+        while ( o < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  GP
+//  Default is Moderate Exponential Growth.
+//
+    else if ( rule[dim] == 3 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+        std::cerr << "  Growth rate 1 for rule 3 not available!\n";
+        std::exit ( 1 );
+      }
+      else if ( growth[dim] == 2 )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+        std::cerr << "  Growth rate 2 for rule 3 not available!\n";
+        std::exit ( 1 );
+      }
+      else if ( growth[dim] == 3 )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+        std::cerr << "  Growth rate 3 for rule 3 not available!\n";
+        std::exit ( 1 );
+      }
+      else if ( growth[dim] == 4 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          p = 5;
+          o = 3;
+          while ( p < 2 * level[dim] + 1 )
+          {
+            p = 2 * p + 1;
+            o = 2 * o + 1;
+          }
+        }
+      }
+      else if ( growth[dim] == 5 || growth[dim] == 0 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          p = 5;
+          o = 3;
+          while ( p < 4 * level[dim] + 1 )
+          {
+            p = 2 * p + 1;
+            o = 2 * o + 1;
+          }
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  GL
+//  Default is Moderate Linear Growth.
+//
+    else if ( rule[dim] == 4 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  GH
+//  Default is Moderate Linear Growth.
+//
+    else if ( rule[dim] == 5 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  GGH
+//  Default is Moderate Linear Growth.
+//
+    else if ( rule[dim] == 6 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  LG
+//  Default is Moderate Linear Growth.
+//
+    else if ( rule[dim] == 7 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  GLG
+//  Default is Moderate Linear Growth.
+//
+    else if ( rule[dim] == 8 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  GJ
+//  Default is Moderate Linear Growth.
+//
+    else if ( rule[dim] == 9 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  HGK
+//  Default is Moderate Exponential Growth.
+//  Exponential growth is interpreted to mean simply take successive rules.
+//
+    else if ( rule[dim] == 10 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+        std::cerr << "  Growth rate 1 for rule 10 not available!\n";
+        std::exit ( 1 );
+      }
+      else if ( growth[dim] == 2 )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+        std::cerr << "  Growth rate 2 for rule 10 not available!\n";
+        std::exit ( 1 );
+      }
+      else if ( growth[dim] == 3 )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+        std::cerr << "  Growth rate 3 for rule 10 not available!\n";
+        std::exit ( 1 );
+      }
+      else if ( growth[dim] == 4 )
+      {
+        l = 0;
+        p = p_hgk[l];
+        o = o_hgk[l];
+        while ( p < 2 * level[dim] + 1 )
+        {
+          l = l + 1;
+          if ( 5 < l )
+          {
+            std::cerr << "\n";
+            std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+            std::cerr << "  Hermite Genz-Keister maximum level exceeded.\n";
+            std::exit ( 1 );
+          }
+          p = p_hgk[l];
+          o = o_hgk[l];
+        }
+      }
+      else if ( growth[dim] == 5 || growth[dim] == 0 )
+      {
+        l = 0;
+        p = p_hgk[l];
+        o = o_hgk[l];
+        while ( p < 4 * level[dim] + 1 )
+        {
+          l = l + 1;
+          if ( 5 < l )
+          {
+            std::cerr << "\n";
+            std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+            std::cerr << "  Hermite Genz-Keister maximum level exceeded.\n";
+            std::exit ( 1 );
+          }
+          p = p_hgk[l];
+          o = o_hgk[l];
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        l = level[dim];
+        l = webbur::i4_max ( l, 0 );
+        if ( 5 < l )
+        {
+          std::cerr << "\n";
+          std::cerr << "LEVEL_GROWTH_TO_ORDER - Fatal error!\n";
+          std::cerr << "  Hermite Genz-Keister maximum level exceeded.\n";
+          std::exit ( 1 );
+        }
+        o = o_hgk[l];
+      }
+    }
+//
+//  UO
+//  Default is Moderate Linear Growth.
+//  We assume the rule is of OPEN type and that it
+//  has a precision typical of Gauss rules.
+//
+    else if ( rule[dim] == 11 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 2 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        o = 1;
+        while ( 2 * o - 1 < 4 * level[dim] + 1 )
+        {
+          o = 2 * o + 1;
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        o = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+      }
+    }
+//
+//  UC
+//  Default is Moderate Linear Growth.
+//  We assume the rule is of CLOSED type and that it
+//  has a precision typical of Clenshaw-Curtis rules.
+//
+    else if ( rule[dim] == 12 )
+    {
+      if ( growth[dim] == 1 )
+      {
+        o = level[dim] + 1;
+      }
+      else if ( growth[dim] == 2 )
+      {
+        o = 2 * ( ( level[dim] + 1 ) / 2 ) + 1;
+      }
+      else if ( growth[dim] == 3 || growth[dim] == 0 )
+      {
+        o = 2 * level[dim] + 1;
+      }
+      else if ( growth[dim] == 4 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          o = 2;
+          while ( o < 2 * level[dim] + 1 )
+          {
+            o = 2 * ( o - 1 ) + 1;
+          }
+        }
+      }
+      else if ( growth[dim] == 5 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          o = 2;
+          while ( o < 4 * level[dim] + 1 )
+          {
+            o = 2 * ( o - 1 ) + 1;
+          }
+        }
+      }
+      else if ( growth[dim] == 6 )
+      {
+        if ( level[dim] == 0 )
+        {
+          o = 1;
+        }
+        else
+        {
+          o = webbur::i4_power ( 2, level[dim] ) + 1;
+        }
+      }
+    }
+    order[dim] = o;
+  }
+  return;
+}
+//****************************************************************************80
+
+void level_to_order_default ( int dim_num, int level[], int rule[],
+  int order[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_DEFAULT: default growth.
+//
+//  Discussion:
+//
+//    This function uses:
+//
+//    * exponential growth rates for fully nested quadrature rules,
+//      ( "CC", "F2", "GP");
+//
+//    * linear growth rates for other rules.
+//      ( "GL", "GH", "GGH", "LG", "GLG", "GJ", "GW" ).
+//
+//    * slow exponential growth alternative for fully nested rules:
+//      ("CC_SE", "F2_SE", "GP_SE").
+//
+//    * moderate exponential growth alternative for fully nested rules:
+//      ("CC_ME", "F2_ME", "GP_ME").
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int LEVEL[DIM_NUM], the 1D levels.
+//
+//    Input, int RULE[DIM_NUM], the rule in each dimension.
+//     1, "CC",  Clenshaw Curtis, Closed Fully Nested rule.
+//     2, "F2",  Fejer Type 2, Open Fully Nested rule.
+//     3, "GP",  Gauss Patterson, Open Fully Nested rule.
+//     4, "GL",  Gauss Legendre, Open Weakly Nested rule.
+//     5, "GH",  Gauss Hermite, Open Weakly Nested rule.
+//     6, "GGH", Generalized Gauss Hermite, Open Weakly Nested rule.
+//     7, "LG",  Gauss Laguerre, Open Non Nested rule.
+//     8, "GLG", Generalized Gauss Laguerre, Open Non Nested rule.
+//     9, "GJ",  Gauss Jacobi, Open Non Nested rule.
+//    10, "GW",  Golub Welsch, (presumed) Open Non Nested rule.
+//    11, "CC_SE", Clenshaw Curtis Slow Exponential, Closed Fully Nested rule.
+//    12, "F2_SE", Fejer Type 2 Slow Exponential, Open Fully Nested rule.
+//    13, "GP_SE", Gauss Patterson Slow Exponential, Open Fully Nested rule.
+//    14, "CC_ME", Clenshaw Curtis Moderate Exponential, Closed Fully Nested rule.
+//    15, "F2_ME", Fejer Type 2 Moderate Exponential, Open Fully Nested rule.
+//    16, "GP_ME", Gauss Patterson Moderate Exponential, Open Fully Nested rule.
+//    17, "CCN", Clenshaw Curtis Nested, Linear, Closed Fully Nested rule.
+//
+//    Output, int ORDER[DIM_NUM], the 1D orders (number of points).
+//
+{
+  int dim;
+  int o;
+  int p;
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    if ( level[dim] < 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_DEFAULT - Fatal error!\n";
+      std::cerr << "  Negative value of LEVEL[DIM]!\n";
+      std::cerr << "  LEVEL[" << dim << "] = " << level[dim] << "\n";
+      std::exit ( 1 );
+    }
+    else if ( rule[dim] == 1 )
+    {
+      if ( level[dim] == 0 )
+      {
+        order[dim] = 1;
+      }
+      else
+      {
+        order[dim] = webbur::i4_power ( 2, level[dim] ) + 1;
+      }
+    }
+    else if ( rule[dim] == 2 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 3 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 4 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 5 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 6 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 7 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 8 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 9 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 10 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else if ( rule[dim] == 11 )
+    {
+      if ( level[dim] == 0 )
+      {
+        o = 1;
+      }
+      else
+      {
+        o = 2;
+        while ( o < 2 * level[dim] + 1 )
+        {
+          o = 2 * ( o - 1 ) + 1;
+        }
+      }
+      order[dim] = o;
+    }
+    else if ( rule[dim] == 12 )
+    {
+      o = 1;
+      while ( o < 2 * level[dim] + 1 )
+      {
+        o = 2 * o + 1;
+      }
+      order[dim] = o;
+    }
+    else if ( rule[dim] == 13 )
+    {
+      if ( level[dim] == 0 )
+      {
+        order[dim] = 1;
+      }
+      else
+      {
+        p = 5;
+        o = 3;
+        while ( p < 2 * level[dim] + 1 )
+        {
+          p = 2 * p + 1;
+          o = 2 * o + 1;
+        }
+        order[dim] = o;
+      }
+    }
+    else if ( rule[dim] == 14 )
+    {
+      if ( level[dim] == 0 )
+      {
+        o = 1;
+      }
+      else
+      {
+        o = 2;
+        while ( o < 4 * level[dim] + 1 )
+        {
+          o = 2 * ( o - 1 ) + 1;
+        }
+      }
+      order[dim] = o;
+    }
+    else if ( rule[dim] == 15 )
+    {
+      o = 1;
+      while ( o < 4 * level[dim] + 1 )
+      {
+        o = 2 * o + 1;
+      }
+      order[dim] = o;
+    }
+    else if ( rule[dim] == 16 )
+    {
+      if ( level[dim] == 0 )
+      {
+        order[dim] = 1;
+      }
+      else
+      {
+        p = 5;
+        o = 3;
+        while ( p < 4 * level[dim] + 1 )
+        {
+          p = 2 * p + 1;
+          o = 2 * o + 1;
+        }
+        order[dim] = o;
+      }
+    }
+    else if ( rule[dim] == 17 )
+    {
+      order[dim] = 2 * level[dim] + 1;
+    }
+    else
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_DEFAULT - Fatal error!\n";
+      std::cerr << "  Unexpected value of RULE["
+           << dim << "] = " << rule[dim] << ".\n";
+      std::exit ( 1 );
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void level_to_order_exponential ( int dim_num, int level[], int rule[],
+  int order[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXPONENTIAL: exponential growth.
+//
+//  Discussion:
+//
+//    The user must preallocate space for the output array ORDER.
+//
+//    Closed rules:
+//
+//      O(0) = 1
+//      O(L) = 2^L + 1;
+//
+//      O = 1, 3, 5, 9, 17, 33, ...
+//
+//    Open rules:
+//
+//      O(L) = 2^(L+1) - 1;
+//
+//      O = 1, 3, 7, 15, 31, 63, ...
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int LEVEL[DIM_NUM], the 1D levels.
+//
+//    Input, int RULE[DIM_NUM], the rule in each dimension.
+//     1, "CC",  Clenshaw Curtis, Closed Fully Nested rule.
+//     2, "F2",  Fejer Type 2, Open Fully Nested rule.
+//     3, "GP",  Gauss Patterson, Open Fully Nested rule.
+//     4, "GL",  Gauss Legendre, Open Weakly Nested rule.
+//     5, "GH",  Gauss Hermite, Open Weakly Nested rule.
+//     6, "GGH", Generalized Gauss Hermite, Open Weakly Nested rule.
+//     7, "LG",  Gauss Laguerre, Open Non Nested rule.
+//     8, "GLG", Generalized Gauss Laguerre, Open Non Nested rule.
+//     9, "GJ",  Gauss Jacobi, Open Non Nested rule.
+//    10, "GW",  Golub Welsch, (presumed) Open Non Nested rule.
+//    11, "CC_SE", Clenshaw Curtis Slow Exponential, Closed Fully Nested rule.
+//    12, "F2_SE", Fejer Type 2 Slow Exponential, Open Fully Nested rule.
+//    13, "GP_SE", Gauss Patterson Slow Exponential, Open Fully Nested rule.
+//    14, "CC_ME", Clenshaw Curtis Moderate Exponential, Closed Fully Nested rule.
+//    15, "F2_ME", Fejer Type 2 Moderate Exponential, Open Fully Nested rule.
+//    16, "GP_ME", Gauss Patterson Moderate Exponential, Open Fully Nested rule.
+//    17, "CCN", Clenshaw Curtis Nested, Linear, Closed Fully Nested rule.
+//
+//    Output, int ORDER[DIM_NUM], the 1D orders (number of points).
+//
+{
+  int dim;
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    if ( level[dim] < 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_EXPONENTIAL - Fatal error!\n";
+      std::cerr << "  Negative value of LEVEL[DIM]!\n";
+      std::cerr << "  LEVEL[" << dim << "] = " << level[dim] << "\n";
+      std::exit ( 1 );
+    }
+    else if ( rule[dim] == 1 )
+    {
+      if ( level[dim] == 0 )
+      {
+        order[dim] = 1;
+      }
+      else
+      {
+        order[dim] = webbur::i4_power ( 2, level[dim] ) + 1;
+      }
+    }
+    else if ( rule[dim] == 2 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 3 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 4 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 5 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 6 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 7 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 8 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 9 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 10 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 11 )
+    {
+      if ( level[dim] == 0 )
+      {
+        order[dim] = 1;
+      }
+      else
+      {
+        order[dim] = webbur::i4_power ( 2, level[dim] ) + 1;
+      }
+    }
+    else if ( rule[dim] == 12 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 13 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 14 )
+    {
+      if ( level[dim] == 0 )
+      {
+        order[dim] = 1;
+      }
+      else
+      {
+        order[dim] = webbur::i4_power ( 2, level[dim] ) + 1;
+      }
+    }
+    else if ( rule[dim] == 15 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 16 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 ) - 1;
+    }
+    else if ( rule[dim] == 17 )
+    {
+      order[dim] = webbur::i4_power ( 2, level[dim] + 1 );
+    }
+    else
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_EXPONENTIAL - Fatal error!\n";
+      std::cerr << "  Unexpected value of RULE["
+           << dim << "] = " << rule[dim] << ".\n";
+      std::exit ( 1 );
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void level_to_order_exponential_slow ( int dim_num, int level[], int rule[],
+  int order[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXPONENTIAL_SLOW: slow exponential growth;
+//
+//  Discussion:
+//
+//    We seek a sequence of quadrature rules with two opposing constraints:
+//    * a measured rise in polynomial precision with increasing level;
+//    * a control on the increase in (new) points per level;
+//
+//    Essentially, we are trying to keep some of the advantages of nesting,
+//    while moderating the cost of the explosive growth in order that occurs
+//    due to the repeated order doubling of nesting.
+//
+//    We wish the number of points at a given level L to be "about" 2 * L + 1,
+//    but we also wish the rules to be completely nested.
+//
+//    One way to do this is to start with a nested family of rules, whose
+//    order will tend to grow exponentially (doubling from one to the next),
+//    but simply to REPEAT each rule as many times as possible.  We move to
+//    the next rule only when the desired precision 2 * L + 1 exceeds the
+//    precision of the current rule.
+//
+//    For both the Clenshaw Curtis and Fejer Type 2 rules, the order and
+//    precision are the same if the order is odd.   That is, an 11 point rule
+//    will integrate exactly all polynomials up to and including degree 11.
+//
+//    For Gauss Patterson rules, the relationship between order and precision
+//    is somewhat more complicated.  For that rule, we take the philosophy
+//    that at each level L, we wish to choose the rule of smallest order
+//    so that the precision of 2 * L + 1 is guaranteed.
+//
+//     L    2*L+1  CC Order    F2 Order    GP Order/Precision
+//
+//     0        1         1           1        1/1
+//     1        3         3           3        3/5
+//     2        5         5           7        3/5
+//     3        7         9           7        7/11
+//     4        9         9          15        7/11
+//     5       11        17          15        7/11
+//     6       13        17          15       15/23
+//     7       15        17          15       15/23
+//     8       17        17          31       15/23
+//     9       19        33          31       15/23
+//    10       21        33          31       15/23
+//    11       23        33          31       15/23
+//    12       25        33          31       31/47
+//    13       27        33          31       31/47
+//    14       29        33          31       31/47
+//    15       31        33          31       31/47
+//    16       33        33          63       31/47
+//    17       35        65          63       31/47
+//    18       37        65          63       31/47
+//    19       39        65          63       31/47
+//    20       41        65          63       31/47
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Knut Petras,
+//    Smolyak Cubature of Given Polynomial Degree with Few Nodes
+//    for Increasing Dimension,
+//    Numerische Mathematik,
+//    Volume 93, Number 4, February 2003, pages 729-753.
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int LEVEL[DIM_NUM], the 1D levels.
+//
+//    Input, int RULE[DIM_NUM], the rule in each dimension.
+//     1, "CC",  Clenshaw Curtis, Closed Fully Nested rule.
+//     2, "F2",  Fejer Type 2, Open Fully Nested rule.
+//     3, "GP",  Gauss Patterson, Open Fully Nested rule.
+//     4, "GL",  Gauss Legendre, Open Weakly Nested rule.
+//     5, "GH",  Gauss Hermite, Open Weakly Nested rule.
+//     6, "GGH", Generalized Gauss Hermite, Open Weakly Nested rule.
+//     7, "LG",  Gauss Laguerre, Open Non Nested rule.
+//     8, "GLG", Generalized Gauss Laguerre, Open Non Nested rule.
+//     9, "GJ",  Gauss Jacobi, Open Non Nested rule.
+//    10, "GW",  Golub Welsch, (presumed) Open Non Nested rule.
+//    11, "CC_SE", Clenshaw Curtis Slow Exponential, Closed Fully Nested rule.
+//    12, "F2_SE", Fejer Type 2 Slow Exponential, Open Fully Nested rule.
+//    13, "GP_SE", Gauss Patterson Slow Exponential, Open Fully Nested rule.
+//    14, "CC_ME", Clenshaw Curtis Moderate Exponential, Closed Fully Nested rule.
+//    15, "F2_ME", Fejer Type 2 Moderate Exponential, Open Fully Nested rule.
+//    16, "GP_ME", Gauss Patterson Moderate Exponential, Open Fully Nested rule.
+//    17, "CCN", Clenshaw Curtis Nested, Linear, Closed Fully Nested rule.
+//
+//    Output, int ORDER[DIM_NUM], the 1D orders (number of points).
+//
+{
+  int dim;
+  int o;
+  int p;
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    if ( level[dim] < 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_EXPONENTIAL_SLOW - Fatal error!\n";
+      std::cerr << "  Negative value of LEVEL[DIM]!\n";
+      std::cerr << "  LEVEL[" << dim << "] = " << level[dim] << "\n";
+      std::exit ( 1 );
+    }
+  }
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    if ( rule[dim] == 1 || rule[dim] == 11 || rule[dim] == 14 || rule[dim] == 17 )
+    {
+      if ( level[dim] == 0 )
+      {
+        o = 1;
+      }
+      else
+      {
+        o = 2;
+        while ( o < 2 * level[dim] + 1 )
+        {
+          o = 2 * ( o - 1 ) + 1;
+        }
+      }
+    }
+    else if ( rule[dim] == 3 || rule[dim] == 13 || rule[dim] == 16 )
+    {
+      if ( level[dim] == 0 )
+      {
+        o = 1;
+      }
+      else
+      {
+        p = 5;
+        o = 3;
+        while ( p < 2 * level[dim] + 1 )
+        {
+          p = 2 * p + 1;
+          o = 2 * o + 1;
+        }
+      }
+    }
+    else
+    {
+      o = 1;
+      while ( o < 2 * level[dim] + 1 )
+      {
+        o = 2 * o + 1;
+      }
+    }
+    order[dim] = o;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void level_to_order_linear ( int dim_num, int level[], int rule[],
+  int order[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_LINEAR: linear growth.
+//
+//  Discussion:
+//
+//    The user must preallocate space for the output array ORDER.
+//
+//      O(L) = 2 * L + 1;
+//
+//      O = 1, 3, 5, 7, 9, ...
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    07 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int LEVEL[DIM_NUM], the 1D levels.
+//
+//    Input, int RULE[DIM_NUM], the rule in each dimension.
+//     1, "CC",  Clenshaw Curtis, Closed Fully Nested rule.
+//     2, "F2",  Fejer Type 2, Open Fully Nested rule.
+//     3, "GP",  Gauss Patterson, Open Fully Nested rule.
+//     4, "GL",  Gauss Legendre, Open Weakly Nested rule.
+//     5, "GH",  Gauss Hermite, Open Weakly Nested rule.
+//     6, "GGH", Generalized Gauss Hermite, Open Weakly Nested rule.
+//     7, "LG",  Gauss Laguerre, Open Non Nested rule.
+//     8, "GLG", Generalized Gauss Laguerre, Open Non Nested rule.
+//     9, "GJ",  Gauss Jacobi, Open Non Nested rule.
+//    10, "GW",  Golub Welsch, (presumed) Open Non Nested rule.
+//    11, "CC_SE", Clenshaw Curtis Slow Exponential, Closed Fully Nested rule.
+//    12, "F2_SE", Fejer Type 2 Slow Exponential, Open Fully Nested rule.
+//    13, "GP_SE", Gauss Patterson Slow Exponential, Open Fully Nested rule.
+//    14, "CC_ME", Clenshaw Curtis Moderate Exponential, Closed Fully Nested rule.
+//    15, "F2_ME", Fejer Type 2 Moderate Exponential, Open Fully Nested rule.
+//    16, "GP_ME", Gauss Patterson Moderate Exponential, Open Fully Nested rule.
+//    17, "CCN", Clenshaw Curtis Nested, Linear, Closed Fully Nested rule.
+//
+//    Output, int ORDER[DIM_NUM], the 1D orders (number of points).
+//
+{
+  int dim;
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    if ( level[dim] < 0 )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_LINEAR - Fatal error!\n";
+      std::cerr << "  Negative value of LEVEL[DIM]!\n";
+      std::cerr << "  LEVEL[" << dim << "] = " << level[dim] << "\n";
+      std::exit ( 1 );
+    }
+  }
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    order[dim] = 2 * level[dim] + 1;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+int level_to_order_exp_cc ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXP_CC is used for Clenshaw-Curtis type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be closed (including both endpoints
+//    except for the level 0 rule) and having a precision
+//    behavior typical of Clenshaw Curtis rules, namely, the ORDER-point
+//    rule is exact for polynomials of degree less than ORDER, and if
+//    ORDER is odd, then the exactness includes polynomials of degree ORDER
+//    as well.
+//
+//    LEVEL  ORDER  ORDER  ORDER
+//           G = 0  G = 1  G = 2
+//    -----  -----  -----  -----
+//        0      1      1      1
+//        1      3      5      3
+//        2      5      9      5
+//        3      9     17      9
+//        4      9     17     17
+//        5     17     33     33
+//        6     17     33     65
+//        7     17     33    129
+//        8     17     33    257
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//    2, full growth.
+//
+//    Output, int LEVEL_TO_ORDER_EXP_CC, the order of the rule.
+//
+{
+  int o;
+//
+//  Slow exponential growth.
+//
+  if ( growth == 0 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = 2;
+      while ( o < 2 * level + 1 )
+      {
+        o = 2 * ( o - 1 ) + 1;
+      }
+    }
+  }
+//
+//  Moderate Exponential Growth.
+//
+  else if ( growth == 1 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = 2;
+      while ( o < 4 * level + 1 )
+      {
+        o = 2 * ( o - 1 ) + 1;
+      }
+    }
+  }
+//
+//  Full Exponential Growth.
+//
+  else if ( growth == 2 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = webbur::i4_power ( 2, level ) + 1;
+    }
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_EXP_CC - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+  return o;
+}
+//****************************************************************************80
+
+int level_to_order_exp_f2 ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXP_F2 is used for Fejer 2 type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be open (not including either endpoint)
+//    and having a precision behavior typical of Fejer Type 2
+//    rules, namely, the ORDER-point rule is exact for polynomials of degree
+//    less than ORDER, and if ORDER is odd, then the exactness includes
+//    polynomials of degree ORDER as well.
+//
+//    LEVEL  ORDER  ORDER  ORDER
+//           G = 0  G = 1  G = 2
+//
+//        0      1      1      1
+//        1      3      7      3
+//        2      7     15      7
+//        3      7     15     15
+//        4     15     31     31
+//        5     15     31     63
+//        6     15     31    127
+//        7     15     31    255
+//        8     31     63    511
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//    2, full growth.
+//
+//    Output, int LEVEL_TO_ORDER_EXP_F2, the order of the rule.
+//
+{
+  int o;
+//
+//  Slow exponential growth.
+//
+  if ( growth == 0 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = 1;
+      while ( o < 2 * level + 1 )
+      {
+        o = 2 * o + 1;
+      }
+    }
+  }
+//
+//  Moderate Exponential Growth.
+//
+  else if ( growth == 1 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = 1;
+      while ( o < 4 * level + 1 )
+      {
+        o = 2 * o + 1;
+      }
+    }
+  }
+//
+//  Full Exponential Growth.
+//
+  else if ( growth == 2 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = webbur::i4_power ( 2, level + 1 ) - 1;
+    }
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_EXP_F2 - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+  return o;
+}
+//****************************************************************************80
+
+int level_to_order_exp_gauss ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXP_GAUSS is used for Gauss type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be open (not including either endpoint),
+//    and having a precision behavior typical of Gauss rules, namely, the
+//    ORDER-point rule is exact for polynomials of degree less than 2 * ORDER.
+//
+//    LEVEL  ORDER  ORDER  ORDER
+//           G = 0  G = 1  G = 2
+//
+//        0      1      1      1
+//        1      3      3      3
+//        2      3      7      7
+//        3      7      7     15
+//        4      7     15     31
+//        5      7     15     63
+//        6      7     15    127
+//        7     15     15    255
+//        8     15     31    511
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//    2, full growth.
+//
+//    Output, int LEVEL_TO_ORDER_EXP_GAUSS, the order of the rule.
+//
+{
+  int o;
+//
+//  Slow exponential growth.
+//
+  if ( growth == 0 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = 1;
+      while ( 2 * o - 1 < 2 * level + 1 )
+      {
+        o = 2 * o + 1;
+      }
+    }
+  }
+//
+//  Moderate Exponential Growth.
+//
+  else if ( growth == 1 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = 1;
+      while ( 2 * o - 1 < 4 * level + 1 )
+      {
+        o = 2 * o + 1;
+      }
+    }
+  }
+//
+//  Full Exponential Growth.
+//
+  else if ( growth == 2 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = webbur::i4_power ( 2, level + 1 ) - 1;
+    }
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_EXP_GAUSS - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+
+  return o;
+}
+//****************************************************************************80
+
+int level_to_order_exp_gp ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXP_GP is used for Gauss-Patterson type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be open (not including either endpoint)
+//    and having a precision behavior typical of Gauss Patterson rules.
+//
+//    Note that there are onlly 9 rules in the family, and so it is possible to
+//    specify input for which the function will fail.
+//
+//    LEVEL  ORDER  ORDER  ORDER
+//           G = 0  G = 1  G = 2
+//
+//        0      1      1      1
+//        1      3      3      3
+//        2      3      7      7
+//        3      7     15     15
+//        4      7     15     31
+//        5      7     15     63
+//        6     15     31    127
+//        7     15     31    255
+//        8     15     31    511
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//    2, full growth.
+//
+//    Output, int LEVEL_TO_ORDER_EXP_GP, the order of the rule.
+//
+{
+  int o;
+  int p;
+//
+//  Slow exponential growth.
+//
+  if ( growth == 0 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      p = 5;
+      o = 3;
+      while ( p < 2 * level + 1 )
+      {
+        p = 2 * p + 1;
+        o = 2 * o + 1;
+        if ( 511 < o )
+        {
+          std::cerr << "\n";
+          std::cerr << "LEVEL_TO_ORDER_EXP_GP - Fatal error!\n";
+          std::cerr << "  Request for unavailable Patterson rule.\n";
+          std::exit ( 1 );
+        }
+      }
+    }
+  }
+//
+//  Moderate Exponential Growth.
+//
+  else if ( growth == 1 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      p = 5;
+      o = 3;
+      while ( p < 4 * level + 1 )
+      {
+        p = 2 * p + 1;
+        o = 2 * o + 1;
+        if ( 511 < o )
+        {
+          std::cerr << "\n";
+          std::cerr << "LEVEL_TO_ORDER_EXP_GP - Fatal error!\n";
+          std::cerr << "  Request for unavailable Patterson rule.\n";
+          std::exit ( 1 );
+        }
+      }
+    }
+  }
+//
+//  Full Exponential Growth.
+//
+  else if ( growth == 2 )
+  {
+    if ( level == 0 )
+    {
+      o = 1;
+    }
+    else
+    {
+      o = webbur::i4_power ( 2, level + 1 ) - 1;
+      if ( 511 < o )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_TO_ORDER_EXP_GP - Fatal error!\n";
+        std::cerr << "  Request for unavailable Patterson rule.\n";
+        std::exit ( 1 );
+      }
+    }
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_EXP_GP - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+
+  return o;
+}
+//****************************************************************************80
+
+int level_to_order_exp_hgk ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_EXP_HGK is used for Hermite Genz-Keister type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be open (not including either endpoint)
+//    and having a precision behavior typical of Hermite Genz-Keister rules.
+//
+//    Note that there are only 6 rules in the family, and so it is possible to
+//    specify input for which the function will fail.
+//
+//    LEVEL  ORDER  ORDER  ORDER
+//           G = 0  G = 1  G = 2
+//
+//        0      1      1      1
+//        1      3      3      3
+//        2      3      9      9
+//        3      9      9     19
+//        4      9     19     35
+//        5      9     19     43
+//        6      9     19     --
+//        7      9     19     --
+//        8     19     35     --
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//    2, full growth.
+//
+//    Output, int LEVEL_TO_ORDER_EXP_HGK, the order of the rule.
+//
+{
+  int l;
+  int o;
+  static int o_hgk[6] = { 1, 3, 9, 19, 35, 43 };
+  int p;
+  static int p_hgk[6] = { 1, 5, 15, 29, 51, 67 };
+//
+//  Slow exponential growth.
+//
+  if ( growth == 0 )
+  {
+    l = 0;
+    p = p_hgk[l];
+    o = o_hgk[l];
+    while ( p < 2 * level + 1 )
+    {
+      l = l + 1;
+      if ( 5 < l )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_TO_ORDER_EXP_HGK - Fatal error!\n";
+        std::cerr << "  Hermite Genz-Keister maximum level exceeded.\n";
+        std::exit ( 1 );
+      }
+      p = p_hgk[l];
+      o = o_hgk[l];
+    }
+  }
+  else if ( growth == 1 )
+  {
+    l = 0;
+    p = p_hgk[l];
+    o = o_hgk[l];
+    while ( p < 4 * level + 1 )
+    {
+      l = l + 1;
+      if ( 5 < l )
+      {
+        std::cerr << "\n";
+        std::cerr << "LEVEL_TO_ORDER_EXP_HGK - Fatal error!\n";
+        std::cerr << "  Hermite Genz-Keister maximum level exceeded.\n";
+        std::exit ( 1 );
+      }
+      p = p_hgk[l];
+      o = o_hgk[l];
+    }
+  }
+  else if ( growth == 2 )
+  {
+    l = level;
+    l = webbur::i4_max ( l, 0 );
+    if ( 5 < l )
+    {
+      std::cerr << "\n";
+      std::cerr << "LEVEL_TO_ORDER_EXP_HGK - Fatal error!\n";
+      std::cerr << "  Hermite Genz-Keister maximum level exceeded.\n";
+      std::exit ( 1 );
+    }
+    o = o_hgk[l];
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_EXP_HGK - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+
+  return o;
+}
+//****************************************************************************80
+
+int level_to_order_linear_nn ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_LINEAR_NN is used for non-nested Gauss type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be open (not including either endpoint),
+//    non-nested, and having a precision behavior typical of Gauss rules.
+//
+//    LEVEL  ORDER  ORDER
+//           G = 0  G = 1
+//
+//        0      1      1
+//        1      2      3
+//        2      3      5
+//        3      4      7
+//        4      5      9
+//        5      6     11
+//        6      7     13
+//        7      8     15
+//        8      9     17
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//
+//    Output, int LEVEL_TO_ORDER_LINEAR_NN, the order of the rule.
+//
+{
+  int o;
+//
+//  Slow linear growth.
+//
+  if ( growth == 0 )
+  {
+    o = level + 1;
+  }
+//
+//  Moderate linear growth.
+//
+  else if ( growth == 1 )
+  {
+    o = 2 * level + 1;
+  }
+  else if ( growth == 2 )
+  {
+    o = 2 * level + 1;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_LINEAR_NN - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+  return o;
+}
+//****************************************************************************80
+
+int level_to_order_linear_wn ( int level, int growth )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    LEVEL_TO_ORDER_LINEAR_WN is used for weakly-nested Gauss type rules.
+//
+//  Discussion:
+//
+//    Rules of this type are assumed to be open (not including either endpoint),
+//    nested, and having a precision behavior typical of Gauss rules.
+//
+//    We assume the rules are to be generated with an odd number of points,
+//    and that all the rules will share a single point, namely 0.
+//
+//    Note that the "moderate growth" option for this function results in the
+//    same values as the moderate growth option for LEVEL_TO_ORDER_LINEAR_NN.
+//
+//    LEVEL  ORDER  ORDER
+//           G = 0  G = 1
+//
+//        0      1      1
+//        1      3      3
+//        2      3      5
+//        3      5      7
+//        4      5      9
+//        5      7     11
+//        6      7     13
+//        7      9     15
+//        8      9     17
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 December 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int LEVEL, the level of the rule.
+//
+//    Input, int GROWTH, the growth policy:
+//    0, slow growth;
+//    1, moderate growth;
+//
+//    Output, int LEVEL_TO_ORDER_LINEAR_WN, the order of the rule.
+//
+{
+  int o;
+//
+//  Slow growth.
+//
+  if ( growth == 0 )
+  {
+    o = 2 * ( ( level + 1 ) / 2 ) + 1;
+  }
+  else if ( growth == 1 )
+  {
+    o = 2 * level + 1;
+  }
+  else if ( growth == 2 )
+  {
+    o = 2 * level + 1;
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "LEVEL_TO_ORDER_LINEAR_WN - Fatal error!\n";
+    std::cerr << "  Illegal value of GROWTH = " << growth << "\n";
+    std::exit ( 1 );
+  }
+  return o;
+}
+//****************************************************************************80
+
+void nc_compute ( int n, double x_min, double x_max, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NC_COMPUTE computes a Newton-Cotes quadrature rule.
+//
+//  Discussion:
+//
+//    For the interval [X_MIN,X_MAX], the Newton-Cotes quadrature rule
+//    estimates
+//
+//      Integral ( X_MIN <= X <= X_MAX ) F(X) dX
+//
+//    using N abscissas X and weights W:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) ).
+//
+//    For the CLOSED rule, the abscissas include the end points.
+//    For the OPEN rule, the abscissas do not include the end points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, double X_MIN, X_MAX, the endpoints of the interval.
+//
+//    Input, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  double *d;
+  int i;
+  int j;
+  int k;
+  double yvala;
+  double yvalb;
+
+  d = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+//
+//  Compute the Lagrange basis polynomial which is 1 at XTAB(I),
+//  and zero at the other nodes.
+//
+    for ( j = 0; j < n; j++ )
+    {
+      d[j] = 0.0;
+    }
+    d[i] = 1.0;
+
+    for ( j = 2; j <= n; j++ )
+    {
+      for ( k = j; k <= n; k++ )
+      {
+        d[n+j-k-1] = ( d[n+j-k-1-1] - d[n+j-k-1] ) / ( x[n+1-k-1] - x[n+j-k-1] );
+      }
+    }
+
+    for ( j = 1; j <= n - 1; j++ )
+    {
+      for ( k = 1; k <= n - j; k++ )
+      {
+        d[n-k-1] = d[n-k-1] - x[n-k-j] * d[n-k];
+      }
+    }
+//
+//  Evaluate the antiderivative of the polynomial at the left and
+//  right endpoints.
+//
+    yvala = d[n-1] / ( double ) ( n );
+    for ( j = n - 2; 0 <= j; j-- )
+    {
+      yvala = yvala * x_min + d[j] / ( double ) ( j + 1 );
+    }
+    yvala = yvala * x_min;
+
+    yvalb = d[n-1] / ( double ) ( n );
+    for ( j = n - 2; 0 <= j; j-- )
+    {
+      yvalb = yvalb * x_max + d[j] / ( double ) ( j + 1 );
+    }
+    yvalb = yvalb * x_max;
+
+    w[i] = yvalb - yvala;
+  }
+
+  delete [] d;
+
+  return;
+}
+//****************************************************************************80
+
+double *nc_compute_new ( int n, double x_min, double x_max, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NC_COMPUTE_NEW computes a Newton-Cotes quadrature rule.
+//
+//  Discussion:
+//
+//    For the interval [X_MIN,X_MAX], the Newton-Cotes quadrature rule
+//    estimates
+//
+//      Integral ( X_MIN <= X <= X_MAX ) F(X) dX
+//
+//    using N abscissas X and weights W:
+//
+//      Sum ( 1 <= I <= N ) W(I) * F ( X(I) ).
+//
+//    For the CLOSED rule, the abscissas include the end points.
+//    For the OPEN rule, the abscissas do not include the end points.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Input, double X_MIN, X_MAX, the endpoints of the interval.
+//
+//    Input, double X[N], the abscissas.
+//
+//    Output, double NC_COMPUTE_NEW[N], the weights.
+//
+{
+  double *d;
+  int i;
+  int j;
+  int k;
+  double *w;
+  double yvala;
+  double yvalb;
+
+  d = new double[n];
+  w = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+//
+//  Compute the Lagrange basis polynomial which is 1 at XTAB(I),
+//  and zero at the other nodes.
+//
+    for ( j = 0; j < n; j++ )
+    {
+      d[j] = 0.0;
+    }
+    d[i] = 1.0;
+
+    for ( j = 2; j <= n; j++ )
+    {
+      for ( k = j; k <= n; k++ )
+      {
+        d[n+j-k-1] = ( d[n+j-k-1-1] - d[n+j-k-1] ) / ( x[n+1-k-1] - x[n+j-k-1] );
+      }
+    }
+
+    for ( j = 1; j <= n - 1; j++ )
+    {
+      for ( k = 1; k <= n - j; k++ )
+      {
+        d[n-k-1] = d[n-k-1] - x[n-k-j] * d[n-k];
+      }
+    }
+//
+//  Evaluate the antiderivative of the polynomial at the left and
+//  right endpoints.
+//
+    yvala = d[n-1] / ( double ) ( n );
+    for ( j = n - 2; 0 <= j; j-- )
+    {
+      yvala = yvala * x_min + d[j] / ( double ) ( j + 1 );
+    }
+    yvala = yvala * x_min;
+
+    yvalb = d[n-1] / ( double ) ( n );
+    for ( j = n - 2; 0 <= j; j-- )
+    {
+      yvalb = yvalb * x_max + d[j] / ( double ) ( j + 1 );
+    }
+    yvalb = yvalb * x_max;
+
+    w[i] = yvalb - yvala;
+  }
+
+  delete [] d;
+
+  return w;
+}
+//****************************************************************************80
+
+void ncc_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NCC_COMPUTE_POINTS: points of a Newton-Cotes Closed quadrature rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    16 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int i;
+  double x_max = 1.0;
+  double x_min = -1.0;
+
+  if ( n == 1 )
+  {
+    x[0] = ( x_max + x_min ) / 2.0;
+  }
+  else
+  {
+    for ( i = 0; i < n; i++ )
+    {
+      x[i] = ( ( double ) ( n - i - 1 ) * x_min
+             + ( double ) (     i     ) * x_max )
+             / ( double ) ( n     - 1 );
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void ncc_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NCC_COMPUTE_WEIGHTS: weights of a Newton-Cotes Closed quadrature rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    16 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  double *x;
+  double x_max = 1.0;
+  double x_min = -1.0;
+
+  if ( n == 1 )
+  {
+    w[0] = x_max - x_min;
+  }
+  else
+  {
+    x = new double[n];
+
+    for ( i = 0; i < n; i++ )
+    {
+      x[i] = ( ( double ) ( n - i - 1 ) * x_min
+             + ( double ) (     i     ) * x_max )
+             / ( double ) ( n     - 1 );
+    }
+    webbur::nc_compute ( n, x_min, x_max, x, w );
+
+    delete [] x;
+  }
+  return;
+}
+//****************************************************************************80
+
+void nco_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NCO_COMPUTE_POINTS: points for a Newton-Cotes Open quadrature rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int i;
+  double x_max = 1.0;
+  double x_min = -1.0;
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = ( ( double ) ( n - i     ) * x_min
+           + ( double ) (   + i + 1 ) * x_max )
+           / ( double ) ( n     + 1 );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void nco_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NCO_COMPUTE_WEIGHTS: weights for a Newton-Cotes Open quadrature rule.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  double *x;
+  double x_max = 1.0;
+  double x_min = -1.0;
+
+  x = new double[n];
+
+  webbur::nco_compute_points ( n, x );
+
+  webbur::nc_compute ( n, x_min, x_max, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void ncoh_compute_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NCOH_COMPUTE_POINTS computes points for a Newton-Cotes "open half" quadrature rule.
+//
+//  Discussion:
+//
+//    The input value N is used to define N equal subintervals of [-1,+1].
+//    The I-th abscissa is the center of the I-th subinterval.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 July 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  int i;
+  const double x_max = 1.0;
+  const double x_min = -1.0;
+
+  for ( i = 0; i < n; i++ )
+  {
+    x[i] = ( ( double ) ( 2 * n - 2 * i - 1 ) * x_min
+           + ( double ) (         2 * i + 1 ) * x_max )
+           / ( double ) ( 2 * n             );
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void ncoh_compute_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    NCOH_COMPUTE_WEIGHTS computes weights for a Newton-Cotes "open half" quadrature rule.
+//
+//  Discussion:
+//
+//    The input value N is used to define N equal subintervals of [-1,+1].
+//    The I-th abscissa is the center of the I-th subinterval.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 July 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//
+//    Output, double W[N], the weights.
+//
+{
+  int i;
+  double *x;
+  const double x_max = 1.0;
+  const double x_min = -1.0;
+
+  x = new double[n];
+
+  webbur::ncoh_compute_points ( n, x );
+
+  webbur::nc_compute ( n, x_min, x_max, x, w );
+
+  delete [] x;
+
+  return;
+}
+//****************************************************************************80
+
+void patterson_lookup ( int n, double x[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    PATTERSON_LOOKUP looks up Patterson quadrature points and weights.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1],
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    11 February 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Prem Kythe, Michael Schaeferkotter,
+//    Handbook of Computational Methods for Integration,
+//    Chapman and Hall, 2004,
+//    ISBN: 1-58488-428-2,
+//    LC: QA299.3.K98.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    Legal values are 1, 3, 7, 15, 31, 63, 127, 255 and 511.
+//
+//    Output, double X[N], the abscissas.
+//
+//    Output, double W[N], the weights.
+//
+{
+  patterson_lookup_points ( n, x );
+  patterson_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+void patterson_lookup_points ( int n, double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    PATTERSON_LOOKUP_POINTS looks up Patterson quadrature points.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1],
+//
+//    These rules constitute a nested family.  The rules can integrate exactly
+//    any polynomial of degree 1, 5, 11, 23, 47, 95, 191, 383 or 767,
+//    respectively.
+//
+//    The data for N = 511 was supplied by Dirk Laurie, and is derived
+//    from a NAG Library function d01arf.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 September 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Prem Kythe, Michael Schaeferkotter,
+//    Handbook of Computational Methods for Integration,
+//    Chapman and Hall, 2004,
+//    ISBN: 1-58488-428-2,
+//    LC: QA299.3.K98.
+//
+//    NAG Library Documentation,
+//    D01ARF,
+//    The Numerical Algorithms Group.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    Legal values are 1, 3, 7, 15, 31, 63, 127, 255 and 511.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  static double x_001[1] =
+  {
+     0.0
+  };
+  static double x_003[3] =
+  {
+    -0.77459666924148337704,
+     0.0,
+     0.77459666924148337704
+  };
+  static double x_007[7] =
+  {
+    -0.96049126870802028342,
+    -0.77459666924148337704,
+    -0.43424374934680255800,
+     0.0,
+     0.43424374934680255800,
+     0.77459666924148337704,
+     0.96049126870802028342
+  };
+  static double x_015[15] =
+  {
+    -0.99383196321275502221,
+    -0.96049126870802028342,
+    -0.88845923287225699889,
+    -0.77459666924148337704,
+    -0.62110294673722640294,
+    -0.43424374934680255800,
+    -0.22338668642896688163,
+     0.0,
+     0.22338668642896688163,
+     0.43424374934680255800,
+     0.62110294673722640294,
+     0.77459666924148337704,
+     0.88845923287225699889,
+     0.96049126870802028342,
+     0.99383196321275502221
+  };
+  static double x_031[31] =
+  {
+    -0.99909812496766759766,
+    -0.99383196321275502221,
+    -0.98153114955374010687,
+    -0.96049126870802028342,
+    -0.92965485742974005667,
+    -0.88845923287225699889,
+    -0.83672593816886873550,
+    -0.77459666924148337704,
+    -0.70249620649152707861,
+    -0.62110294673722640294,
+    -0.53131974364437562397,
+    -0.43424374934680255800,
+    -0.33113539325797683309,
+    -0.22338668642896688163,
+    -0.11248894313318662575,
+     0.0,
+     0.11248894313318662575,
+     0.22338668642896688163,
+     0.33113539325797683309,
+     0.43424374934680255800,
+     0.53131974364437562397,
+     0.62110294673722640294,
+     0.70249620649152707861,
+     0.77459666924148337704,
+     0.83672593816886873550,
+     0.88845923287225699889,
+     0.92965485742974005667,
+     0.96049126870802028342,
+     0.98153114955374010687,
+     0.99383196321275502221,
+     0.99909812496766759766
+  };
+  static double x_063[63] =
+  {
+    -0.99987288812035761194,
+    -0.99909812496766759766,
+    -0.99720625937222195908,
+    -0.99383196321275502221,
+    -0.98868475754742947994,
+    -0.98153114955374010687,
+    -0.97218287474858179658,
+    -0.96049126870802028342,
+    -0.94634285837340290515,
+    -0.92965485742974005667,
+    -0.91037115695700429250,
+    -0.88845923287225699889,
+    -0.86390793819369047715,
+    -0.83672593816886873550,
+    -0.80694053195021761186,
+    -0.77459666924148337704,
+    -0.73975604435269475868,
+    -0.70249620649152707861,
+    -0.66290966002478059546,
+    -0.62110294673722640294,
+    -0.57719571005204581484,
+    -0.53131974364437562397,
+    -0.48361802694584102756,
+    -0.43424374934680255800,
+    -0.38335932419873034692,
+    -0.33113539325797683309,
+    -0.27774982202182431507,
+    -0.22338668642896688163,
+    -0.16823525155220746498,
+    -0.11248894313318662575,
+    -0.056344313046592789972,
+     0.0,
+     0.056344313046592789972,
+     0.11248894313318662575,
+     0.16823525155220746498,
+     0.22338668642896688163,
+     0.27774982202182431507,
+     0.33113539325797683309,
+     0.38335932419873034692,
+     0.43424374934680255800,
+     0.48361802694584102756,
+     0.53131974364437562397,
+     0.57719571005204581484,
+     0.62110294673722640294,
+     0.66290966002478059546,
+     0.70249620649152707861,
+     0.73975604435269475868,
+     0.77459666924148337704,
+     0.80694053195021761186,
+     0.83672593816886873550,
+     0.86390793819369047715,
+     0.88845923287225699889,
+     0.91037115695700429250,
+     0.92965485742974005667,
+     0.94634285837340290515,
+     0.96049126870802028342,
+     0.97218287474858179658,
+     0.98153114955374010687,
+     0.98868475754742947994,
+     0.99383196321275502221,
+     0.99720625937222195908,
+     0.99909812496766759766,
+     0.99987288812035761194
+  };
+  static double x_127[127] =
+  {
+    -0.99998243035489159858,
+    -0.99987288812035761194,
+    -0.99959879967191068325,
+    -0.99909812496766759766,
+    -0.99831663531840739253,
+    -0.99720625937222195908,
+    -0.99572410469840718851,
+    -0.99383196321275502221,
+    -0.99149572117810613240,
+    -0.98868475754742947994,
+    -0.98537149959852037111,
+    -0.98153114955374010687,
+    -0.97714151463970571416,
+    -0.97218287474858179658,
+    -0.96663785155841656709,
+    -0.96049126870802028342,
+    -0.95373000642576113641,
+    -0.94634285837340290515,
+    -0.93832039777959288365,
+    -0.92965485742974005667,
+    -0.92034002547001242073,
+    -0.91037115695700429250,
+    -0.89974489977694003664,
+    -0.88845923287225699889,
+    -0.87651341448470526974,
+    -0.86390793819369047715,
+    -0.85064449476835027976,
+    -0.83672593816886873550,
+    -0.82215625436498040737,
+    -0.80694053195021761186,
+    -0.79108493379984836143,
+    -0.77459666924148337704,
+    -0.75748396638051363793,
+    -0.73975604435269475868,
+    -0.72142308537009891548,
+    -0.70249620649152707861,
+    -0.68298743109107922809,
+    -0.66290966002478059546,
+    -0.64227664250975951377,
+    -0.62110294673722640294,
+    -0.59940393024224289297,
+    -0.57719571005204581484,
+    -0.55449513263193254887,
+    -0.53131974364437562397,
+    -0.50768775753371660215,
+    -0.48361802694584102756,
+    -0.45913001198983233287,
+    -0.43424374934680255800,
+    -0.40897982122988867241,
+    -0.38335932419873034692,
+    -0.35740383783153215238,
+    -0.33113539325797683309,
+    -0.30457644155671404334,
+    -0.27774982202182431507,
+    -0.25067873030348317661,
+    -0.22338668642896688163,
+    -0.19589750271110015392,
+    -0.16823525155220746498,
+    -0.14042423315256017459,
+    -0.11248894313318662575,
+    -0.084454040083710883710,
+    -0.056344313046592789972,
+    -0.028184648949745694339,
+     0.0,
+     0.028184648949745694339,
+     0.056344313046592789972,
+     0.084454040083710883710,
+     0.11248894313318662575,
+     0.14042423315256017459,
+     0.16823525155220746498,
+     0.19589750271110015392,
+     0.22338668642896688163,
+     0.25067873030348317661,
+     0.27774982202182431507,
+     0.30457644155671404334,
+     0.33113539325797683309,
+     0.35740383783153215238,
+     0.38335932419873034692,
+     0.40897982122988867241,
+     0.43424374934680255800,
+     0.45913001198983233287,
+     0.48361802694584102756,
+     0.50768775753371660215,
+     0.53131974364437562397,
+     0.55449513263193254887,
+     0.57719571005204581484,
+     0.59940393024224289297,
+     0.62110294673722640294,
+     0.64227664250975951377,
+     0.66290966002478059546,
+     0.68298743109107922809,
+     0.70249620649152707861,
+     0.72142308537009891548,
+     0.73975604435269475868,
+     0.75748396638051363793,
+     0.77459666924148337704,
+     0.79108493379984836143,
+     0.80694053195021761186,
+     0.82215625436498040737,
+     0.83672593816886873550,
+     0.85064449476835027976,
+     0.86390793819369047715,
+     0.87651341448470526974,
+     0.88845923287225699889,
+     0.89974489977694003664,
+     0.91037115695700429250,
+     0.92034002547001242073,
+     0.92965485742974005667,
+     0.93832039777959288365,
+     0.94634285837340290515,
+     0.95373000642576113641,
+     0.96049126870802028342,
+     0.96663785155841656709,
+     0.97218287474858179658,
+     0.97714151463970571416,
+     0.98153114955374010687,
+     0.98537149959852037111,
+     0.98868475754742947994,
+     0.99149572117810613240,
+     0.99383196321275502221,
+     0.99572410469840718851,
+     0.99720625937222195908,
+     0.99831663531840739253,
+     0.99909812496766759766,
+     0.99959879967191068325,
+     0.99987288812035761194,
+     0.99998243035489159858
+  };
+  static double x_255[255] =
+  {
+    -0.99999759637974846462,
+    -0.99998243035489159858,
+    -0.99994399620705437576,
+    -0.99987288812035761194,
+    -0.99976049092443204733,
+    -0.99959879967191068325,
+    -0.99938033802502358193,
+    -0.99909812496766759766,
+    -0.99874561446809511470,
+    -0.99831663531840739253,
+    -0.99780535449595727456,
+    -0.99720625937222195908,
+    -0.99651414591489027385,
+    -0.99572410469840718851,
+    -0.99483150280062100052,
+    -0.99383196321275502221,
+    -0.99272134428278861533,
+    -0.99149572117810613240,
+    -0.99015137040077015918,
+    -0.98868475754742947994,
+    -0.98709252795403406719,
+    -0.98537149959852037111,
+    -0.98351865757863272876,
+    -0.98153114955374010687,
+    -0.97940628167086268381,
+    -0.97714151463970571416,
+    -0.97473445975240266776,
+    -0.97218287474858179658,
+    -0.96948465950245923177,
+    -0.96663785155841656709,
+    -0.96364062156981213252,
+    -0.96049126870802028342,
+    -0.95718821610986096274,
+    -0.95373000642576113641,
+    -0.95011529752129487656,
+    -0.94634285837340290515,
+    -0.94241156519108305981,
+    -0.93832039777959288365,
+    -0.93406843615772578800,
+    -0.92965485742974005667,
+    -0.92507893290707565236,
+    -0.92034002547001242073,
+    -0.91543758715576504064,
+    -0.91037115695700429250,
+    -0.90514035881326159519,
+    -0.89974489977694003664,
+    -0.89418456833555902286,
+    -0.88845923287225699889,
+    -0.88256884024734190684,
+    -0.87651341448470526974,
+    -0.87029305554811390585,
+    -0.86390793819369047715,
+    -0.85735831088623215653,
+    -0.85064449476835027976,
+    -0.84376688267270860104,
+    -0.83672593816886873550,
+    -0.82952219463740140018,
+    -0.82215625436498040737,
+    -0.81462878765513741344,
+    -0.80694053195021761186,
+    -0.79909229096084140180,
+    -0.79108493379984836143,
+    -0.78291939411828301639,
+    -0.77459666924148337704,
+    -0.76611781930376009072,
+    -0.75748396638051363793,
+    -0.74869629361693660282,
+    -0.73975604435269475868,
+    -0.73066452124218126133,
+    -0.72142308537009891548,
+    -0.71203315536225203459,
+    -0.70249620649152707861,
+    -0.69281376977911470289,
+    -0.68298743109107922809,
+    -0.67301883023041847920,
+    -0.66290966002478059546,
+    -0.65266166541001749610,
+    -0.64227664250975951377,
+    -0.63175643771119423041,
+    -0.62110294673722640294,
+    -0.61031811371518640016,
+    -0.59940393024224289297,
+    -0.58836243444766254143,
+    -0.57719571005204581484,
+    -0.56590588542365442262,
+    -0.55449513263193254887,
+    -0.54296566649831149049,
+    -0.53131974364437562397,
+    -0.51955966153745702199,
+    -0.50768775753371660215,
+    -0.49570640791876146017,
+    -0.48361802694584102756,
+    -0.47142506587165887693,
+    -0.45913001198983233287,
+    -0.44673538766202847374,
+    -0.43424374934680255800,
+    -0.42165768662616330006,
+    -0.40897982122988867241,
+    -0.39621280605761593918,
+    -0.38335932419873034692,
+    -0.37042208795007823014,
+    -0.35740383783153215238,
+    -0.34430734159943802278,
+    -0.33113539325797683309,
+    -0.31789081206847668318,
+    -0.30457644155671404334,
+    -0.29119514851824668196,
+    -0.27774982202182431507,
+    -0.26424337241092676194,
+    -0.25067873030348317661,
+    -0.23705884558982972721,
+    -0.22338668642896688163,
+    -0.20966523824318119477,
+    -0.19589750271110015392,
+    -0.18208649675925219825,
+    -0.16823525155220746498,
+    -0.15434681148137810869,
+    -0.14042423315256017459,
+    -0.12647058437230196685,
+    -0.11248894313318662575,
+    -0.098482396598119202090,
+    -0.084454040083710883710,
+    -0.070406976042855179063,
+    -0.056344313046592789972,
+    -0.042269164765363603212,
+    -0.028184648949745694339,
+    -0.014093886410782462614,
+    0.0,
+    0.014093886410782462614,
+    0.028184648949745694339,
+    0.042269164765363603212,
+    0.056344313046592789972,
+    0.070406976042855179063,
+    0.084454040083710883710,
+    0.098482396598119202090,
+    0.11248894313318662575,
+    0.12647058437230196685,
+    0.14042423315256017459,
+    0.15434681148137810869,
+    0.16823525155220746498,
+    0.18208649675925219825,
+    0.19589750271110015392,
+    0.20966523824318119477,
+    0.22338668642896688163,
+    0.23705884558982972721,
+    0.25067873030348317661,
+    0.26424337241092676194,
+    0.27774982202182431507,
+    0.29119514851824668196,
+    0.30457644155671404334,
+    0.31789081206847668318,
+    0.33113539325797683309,
+    0.34430734159943802278,
+    0.35740383783153215238,
+    0.37042208795007823014,
+    0.38335932419873034692,
+    0.39621280605761593918,
+    0.40897982122988867241,
+    0.42165768662616330006,
+    0.43424374934680255800,
+    0.44673538766202847374,
+    0.45913001198983233287,
+    0.47142506587165887693,
+    0.48361802694584102756,
+    0.49570640791876146017,
+    0.50768775753371660215,
+    0.51955966153745702199,
+    0.53131974364437562397,
+    0.54296566649831149049,
+    0.55449513263193254887,
+    0.56590588542365442262,
+    0.57719571005204581484,
+    0.58836243444766254143,
+    0.59940393024224289297,
+    0.61031811371518640016,
+    0.62110294673722640294,
+    0.63175643771119423041,
+    0.64227664250975951377,
+    0.65266166541001749610,
+    0.66290966002478059546,
+    0.67301883023041847920,
+    0.68298743109107922809,
+    0.69281376977911470289,
+    0.70249620649152707861,
+    0.71203315536225203459,
+    0.72142308537009891548,
+    0.73066452124218126133,
+    0.73975604435269475868,
+    0.74869629361693660282,
+    0.75748396638051363793,
+    0.76611781930376009072,
+    0.77459666924148337704,
+    0.78291939411828301639,
+    0.79108493379984836143,
+    0.79909229096084140180,
+    0.80694053195021761186,
+    0.81462878765513741344,
+    0.82215625436498040737,
+    0.82952219463740140018,
+    0.83672593816886873550,
+    0.84376688267270860104,
+    0.85064449476835027976,
+    0.85735831088623215653,
+    0.86390793819369047715,
+    0.87029305554811390585,
+    0.87651341448470526974,
+    0.88256884024734190684,
+    0.88845923287225699889,
+    0.89418456833555902286,
+    0.89974489977694003664,
+    0.90514035881326159519,
+    0.91037115695700429250,
+    0.91543758715576504064,
+    0.92034002547001242073,
+    0.92507893290707565236,
+    0.92965485742974005667,
+    0.93406843615772578800,
+    0.93832039777959288365,
+    0.94241156519108305981,
+    0.94634285837340290515,
+    0.95011529752129487656,
+    0.95373000642576113641,
+    0.95718821610986096274,
+    0.96049126870802028342,
+    0.96364062156981213252,
+    0.96663785155841656709,
+    0.96948465950245923177,
+    0.97218287474858179658,
+    0.97473445975240266776,
+    0.97714151463970571416,
+    0.97940628167086268381,
+    0.98153114955374010687,
+    0.98351865757863272876,
+    0.98537149959852037111,
+    0.98709252795403406719,
+    0.98868475754742947994,
+    0.99015137040077015918,
+    0.99149572117810613240,
+    0.99272134428278861533,
+    0.99383196321275502221,
+    0.99483150280062100052,
+    0.99572410469840718851,
+    0.99651414591489027385,
+    0.99720625937222195908,
+    0.99780535449595727456,
+    0.99831663531840739253,
+    0.99874561446809511470,
+    0.99909812496766759766,
+    0.99938033802502358193,
+    0.99959879967191068325,
+    0.99976049092443204733,
+    0.99987288812035761194,
+    0.99994399620705437576,
+    0.99998243035489159858,
+    0.99999759637974846462
+  };
+  static double x_511[511] =
+  {
+    -0.999999672956734384381,
+    -0.999997596379748464620,
+    -0.999992298136257588028,
+    -0.999982430354891598580,
+    -0.999966730098486276883,
+    -0.999943996207054375764,
+    -0.999913081144678282800,
+    -0.999872888120357611938,
+    -0.999822363679787739196,
+    -0.999760490924432047330,
+    -0.999686286448317731776,
+    -0.999598799671910683252,
+    -0.999497112467187190535,
+    -0.999380338025023581928,
+    -0.999247618943342473599,
+    -0.999098124967667597662,
+    -0.998931050830810562236,
+    -0.998745614468095114704,
+    -0.998541055697167906027,
+    -0.998316635318407392531,
+    -0.998071634524930323302,
+    -0.997805354495957274562,
+    -0.997517116063472399965,
+    -0.997206259372221959076,
+    -0.996872143485260161299,
+    -0.996514145914890273849,
+    -0.996131662079315037786,
+    -0.995724104698407188509,
+    -0.995290903148810302261,
+    -0.994831502800621000519,
+    -0.994345364356723405931,
+    -0.993831963212755022209,
+    -0.993290788851684966211,
+    -0.992721344282788615328,
+    -0.992123145530863117683,
+    -0.991495721178106132399,
+    -0.990838611958294243677,
+    -0.990151370400770159181,
+    -0.989433560520240838716,
+    -0.988684757547429479939,
+    -0.987904547695124280467,
+    -0.987092527954034067190,
+    -0.986248305913007552681,
+    -0.985371499598520371114,
+    -0.984461737328814534596,
+    -0.983518657578632728762,
+    -0.982541908851080604251,
+    -0.981531149553740106867,
+    -0.980486047876721339416,
+    -0.979406281670862683806,
+    -0.978291538324758539526,
+    -0.977141514639705714156,
+    -0.975955916702011753129,
+    -0.974734459752402667761,
+    -0.973476868052506926773,
+    -0.972182874748581796578,
+    -0.970852221732792443256,
+    -0.969484659502459231771,
+    -0.968079947017759947964,
+    -0.966637851558416567092,
+    -0.965158148579915665979,
+    -0.963640621569812132521,
+    -0.962085061904651475741,
+    -0.960491268708020283423,
+    -0.958859048710200221356,
+    -0.957188216109860962736,
+    -0.955478592438183697574,
+    -0.953730006425761136415,
+    -0.951942293872573589498,
+    -0.950115297521294876558,
+    -0.948248866934137357063,
+    -0.946342858373402905148,
+    -0.944397134685866648591,
+    -0.942411565191083059813,
+    -0.940386025573669721370,
+    -0.938320397779592883655,
+    -0.936214569916450806625,
+    -0.934068436157725787999,
+    -0.931881896650953639345,
+    -0.929654857429740056670,
+    -0.927387230329536696843,
+    -0.925078932907075652364,
+    -0.922729888363349241523,
+    -0.920340025470012420730,
+    -0.917909278499077501636,
+    -0.915437587155765040644,
+    -0.912924896514370590080,
+    -0.910371156957004292498,
+    -0.907776324115058903624,
+    -0.905140358813261595189,
+    -0.902463227016165675048,
+    -0.899744899776940036639,
+    -0.896985353188316590376,
+    -0.894184568335559022859,
+    -0.891342531251319871666,
+    -0.888459232872256998890,
+    -0.885534668997285008926,
+    -0.882568840247341906842,
+    -0.879561752026556262568,
+    -0.876513414484705269742,
+    -0.873423842480859310192,
+    -0.870293055548113905851,
+    -0.867121077859315215614,
+    -0.863907938193690477146,
+    -0.860653669904299969802,
+    -0.857358310886232156525,
+    -0.854021903545468625813,
+    -0.850644494768350279758,
+    -0.847226135891580884381,
+    -0.843766882672708601038,
+    -0.840266795261030442350,
+    -0.836725938168868735503,
+    -0.833144380243172624728,
+    -0.829522194637401400178,
+    -0.825859458783650001088,
+    -0.822156254364980407373,
+    -0.818412667287925807395,
+    -0.814628787655137413436,
+    -0.810804709738146594361,
+    -0.806940531950217611856,
+    -0.803036356819268687782,
+    -0.799092290960841401800,
+    -0.795108445051100526780,
+    -0.791084933799848361435,
+    -0.787021875923539422170,
+    -0.782919394118283016385,
+    -0.778777615032822744702,
+    -0.774596669241483377036,
+    -0.770376691217076824278,
+    -0.766117819303760090717,
+    -0.761820195689839149173,
+    -0.757483966380513637926,
+    -0.753109281170558142523,
+    -0.748696293616936602823,
+    -0.744245161011347082309,
+    -0.739756044352694758677,
+    -0.735229108319491547663,
+    -0.730664521242181261329,
+    -0.726062455075389632685,
+    -0.721423085370098915485,
+    -0.716746591245747095767,
+    -0.712033155362252034587,
+    -0.707282963891961103412,
+    -0.702496206491527078610,
+    -0.697673076273711232906,
+    -0.692813769779114702895,
+    -0.687918486947839325756,
+    -0.682987431091079228087,
+    -0.678020808862644517838,
+    -0.673018830230418479199,
+    -0.667981708447749702165,
+    -0.662909660024780595461,
+    -0.657802904699713735422,
+    -0.652661665410017496101,
+    -0.647486168263572388782,
+    -0.642276642509759513774,
+    -0.637033320510492495071,
+    -0.631756437711194230414,
+    -0.626446232611719746542,
+    -0.621102946737226402941,
+    -0.615726824608992638014,
+    -0.610318113715186400156,
+    -0.604877064481584353319,
+    -0.599403930242242892974,
+    -0.593898967210121954393,
+    -0.588362434447662541434,
+    -0.582794593837318850840,
+    -0.577195710052045814844,
+    -0.571566050525742833992,
+    -0.565905885423654422623,
+    -0.560215487612728441818,
+    -0.554495132631932548866,
+    -0.548745098662529448608,
+    -0.542965666498311490492,
+    -0.537157119515795115982,
+    -0.531319743644375623972,
+    -0.525453827336442687395,
+    -0.519559661537457021993,
+    -0.513637539655988578507,
+    -0.507687757533716602155,
+    -0.501710613415391878251,
+    -0.495706407918761460170,
+    -0.489675444004456155436,
+    -0.483618026945841027562,
+    -0.477534464298829155284,
+    -0.471425065871658876934,
+    -0.465290143694634735858,
+    -0.459130011989832332874,
+    -0.452944987140767283784,
+    -0.446735387662028473742,
+    -0.440501534168875795783,
+    -0.434243749346802558002,
+    -0.427962357921062742583,
+    -0.421657686626163300056,
+    -0.415330064175321663764,
+    -0.408979821229888672409,
+    -0.402607290368737092671,
+    -0.396212806057615939183,
+    -0.389796704618470795479,
+    -0.383359324198730346916,
+    -0.376901004740559344802,
+    -0.370422087950078230138,
+    -0.363922917266549655269,
+    -0.357403837831532152376,
+    -0.350865196458001209011,
+    -0.344307341599438022777,
+    -0.337730623318886219621,
+    -0.331135393257976833093,
+    -0.324522004605921855207,
+    -0.317890812068476683182,
+    -0.311242171836871800300,
+    -0.304576441556714043335,
+    -0.297893980296857823437,
+    -0.291195148518246681964,
+    -0.284480308042725577496,
+    -0.277749822021824315065,
+    -0.271004054905512543536,
+    -0.264243372410926761945,
+    -0.257468141491069790481,
+    -0.250678730303483176613,
+    -0.243875508178893021593,
+    -0.237058845589829727213,
+    -0.230229114119222177156,
+    -0.223386686428966881628,
+    -0.216531936228472628081,
+    -0.209665238243181194766,
+    -0.202786968183064697557,
+    -0.195897502711100153915,
+    -0.188997219411721861059,
+    -0.182086496759252198246,
+    -0.175165714086311475707,
+    -0.168235251552207464982,
+    -0.161295490111305257361,
+    -0.154346811481378108692,
+    -0.147389598111939940054,
+    -0.140424233152560174594,
+    -0.133451100421161601344,
+    -0.126470584372301966851,
+    -0.119483070065440005133,
+    -0.112488943133186625746,
+    -0.105488589749541988533,
+    -0.984823965981192020903E-01,
+    -0.914707508403553909095E-01,
+    -0.844540400837108837102E-01,
+    -0.774326523498572825675E-01,
+    -0.704069760428551790633E-01,
+    -0.633773999173222898797E-01,
+    -0.563443130465927899720E-01,
+    -0.493081047908686267156E-01,
+    -0.422691647653636032124E-01,
+    -0.352278828084410232603E-01,
+    -0.281846489497456943394E-01,
+    -0.211398533783310883350E-01,
+    -0.140938864107824626142E-01,
+    -0.704713845933674648514E-02,
+    +0.000000000000000000000,
+    +0.704713845933674648514E-02,
+    +0.140938864107824626142E-01,
+    +0.211398533783310883350E-01,
+    +0.281846489497456943394E-01,
+    +0.352278828084410232603E-01,
+    +0.422691647653636032124E-01,
+    +0.493081047908686267156E-01,
+    +0.563443130465927899720E-01,
+    +0.633773999173222898797E-01,
+    +0.704069760428551790633E-01,
+    +0.774326523498572825675E-01,
+    +0.844540400837108837102E-01,
+    +0.914707508403553909095E-01,
+    +0.984823965981192020903E-01,
+    +0.105488589749541988533,
+    +0.112488943133186625746,
+    +0.119483070065440005133,
+    +0.126470584372301966851,
+    +0.133451100421161601344,
+    +0.140424233152560174594,
+    +0.147389598111939940054,
+    +0.154346811481378108692,
+    +0.161295490111305257361,
+    +0.168235251552207464982,
+    +0.175165714086311475707,
+    +0.182086496759252198246,
+    +0.188997219411721861059,
+    +0.195897502711100153915,
+    +0.202786968183064697557,
+    +0.209665238243181194766,
+    +0.216531936228472628081,
+    +0.223386686428966881628,
+    +0.230229114119222177156,
+    +0.237058845589829727213,
+    +0.243875508178893021593,
+    +0.250678730303483176613,
+    +0.257468141491069790481,
+    +0.264243372410926761945,
+    +0.271004054905512543536,
+    +0.277749822021824315065,
+    +0.284480308042725577496,
+    +0.291195148518246681964,
+    +0.297893980296857823437,
+    +0.304576441556714043335,
+    +0.311242171836871800300,
+    +0.317890812068476683182,
+    +0.324522004605921855207,
+    +0.331135393257976833093,
+    +0.337730623318886219621,
+    +0.344307341599438022777,
+    +0.350865196458001209011,
+    +0.357403837831532152376,
+    +0.363922917266549655269,
+    +0.370422087950078230138,
+    +0.376901004740559344802,
+    +0.383359324198730346916,
+    +0.389796704618470795479,
+    +0.396212806057615939183,
+    +0.402607290368737092671,
+    +0.408979821229888672409,
+    +0.415330064175321663764,
+    +0.421657686626163300056,
+    +0.427962357921062742583,
+    +0.434243749346802558002,
+    +0.440501534168875795783,
+    +0.446735387662028473742,
+    +0.452944987140767283784,
+    +0.459130011989832332874,
+    +0.465290143694634735858,
+    +0.471425065871658876934,
+    +0.477534464298829155284,
+    +0.483618026945841027562,
+    +0.489675444004456155436,
+    +0.495706407918761460170,
+    +0.501710613415391878251,
+    +0.507687757533716602155,
+    +0.513637539655988578507,
+    +0.519559661537457021993,
+    +0.525453827336442687395,
+    +0.531319743644375623972,
+    +0.537157119515795115982,
+    +0.542965666498311490492,
+    +0.548745098662529448608,
+    +0.554495132631932548866,
+    +0.560215487612728441818,
+    +0.565905885423654422623,
+    +0.571566050525742833992,
+    +0.577195710052045814844,
+    +0.582794593837318850840,
+    +0.588362434447662541434,
+    +0.593898967210121954393,
+    +0.599403930242242892974,
+    +0.604877064481584353319,
+    +0.610318113715186400156,
+    +0.615726824608992638014,
+    +0.621102946737226402941,
+    +0.626446232611719746542,
+    +0.631756437711194230414,
+    +0.637033320510492495071,
+    +0.642276642509759513774,
+    +0.647486168263572388782,
+    +0.652661665410017496101,
+    +0.657802904699713735422,
+    +0.662909660024780595461,
+    +0.667981708447749702165,
+    +0.673018830230418479199,
+    +0.678020808862644517838,
+    +0.682987431091079228087,
+    +0.687918486947839325756,
+    +0.692813769779114702895,
+    +0.697673076273711232906,
+    +0.702496206491527078610,
+    +0.707282963891961103412,
+    +0.712033155362252034587,
+    +0.716746591245747095767,
+    +0.721423085370098915485,
+    +0.726062455075389632685,
+    +0.730664521242181261329,
+    +0.735229108319491547663,
+    +0.739756044352694758677,
+    +0.744245161011347082309,
+    +0.748696293616936602823,
+    +0.753109281170558142523,
+    +0.757483966380513637926,
+    +0.761820195689839149173,
+    +0.766117819303760090717,
+    +0.770376691217076824278,
+    +0.774596669241483377036,
+    +0.778777615032822744702,
+    +0.782919394118283016385,
+    +0.787021875923539422170,
+    +0.791084933799848361435,
+    +0.795108445051100526780,
+    +0.799092290960841401800,
+    +0.803036356819268687782,
+    +0.806940531950217611856,
+    +0.810804709738146594361,
+    +0.814628787655137413436,
+    +0.818412667287925807395,
+    +0.822156254364980407373,
+    +0.825859458783650001088,
+    +0.829522194637401400178,
+    +0.833144380243172624728,
+    +0.836725938168868735503,
+    +0.840266795261030442350,
+    +0.843766882672708601038,
+    +0.847226135891580884381,
+    +0.850644494768350279758,
+    +0.854021903545468625813,
+    +0.857358310886232156525,
+    +0.860653669904299969802,
+    +0.863907938193690477146,
+    +0.867121077859315215614,
+    +0.870293055548113905851,
+    +0.873423842480859310192,
+    +0.876513414484705269742,
+    +0.879561752026556262568,
+    +0.882568840247341906842,
+    +0.885534668997285008926,
+    +0.888459232872256998890,
+    +0.891342531251319871666,
+    +0.894184568335559022859,
+    +0.896985353188316590376,
+    +0.899744899776940036639,
+    +0.902463227016165675048,
+    +0.905140358813261595189,
+    +0.907776324115058903624,
+    +0.910371156957004292498,
+    +0.912924896514370590080,
+    +0.915437587155765040644,
+    +0.917909278499077501636,
+    +0.920340025470012420730,
+    +0.922729888363349241523,
+    +0.925078932907075652364,
+    +0.927387230329536696843,
+    +0.929654857429740056670,
+    +0.931881896650953639345,
+    +0.934068436157725787999,
+    +0.936214569916450806625,
+    +0.938320397779592883655,
+    +0.940386025573669721370,
+    +0.942411565191083059813,
+    +0.944397134685866648591,
+    +0.946342858373402905148,
+    +0.948248866934137357063,
+    +0.950115297521294876558,
+    +0.951942293872573589498,
+    +0.953730006425761136415,
+    +0.955478592438183697574,
+    +0.957188216109860962736,
+    +0.958859048710200221356,
+    +0.960491268708020283423,
+    +0.962085061904651475741,
+    +0.963640621569812132521,
+    +0.965158148579915665979,
+    +0.966637851558416567092,
+    +0.968079947017759947964,
+    +0.969484659502459231771,
+    +0.970852221732792443256,
+    +0.972182874748581796578,
+    +0.973476868052506926773,
+    +0.974734459752402667761,
+    +0.975955916702011753129,
+    +0.977141514639705714156,
+    +0.978291538324758539526,
+    +0.979406281670862683806,
+    +0.980486047876721339416,
+    +0.981531149553740106867,
+    +0.982541908851080604251,
+    +0.983518657578632728762,
+    +0.984461737328814534596,
+    +0.985371499598520371114,
+    +0.986248305913007552681,
+    +0.987092527954034067190,
+    +0.987904547695124280467,
+    +0.988684757547429479939,
+    +0.989433560520240838716,
+    +0.990151370400770159181,
+    +0.990838611958294243677,
+    +0.991495721178106132399,
+    +0.992123145530863117683,
+    +0.992721344282788615328,
+    +0.993290788851684966211,
+    +0.993831963212755022209,
+    +0.994345364356723405931,
+    +0.994831502800621000519,
+    +0.995290903148810302261,
+    +0.995724104698407188509,
+    +0.996131662079315037786,
+    +0.996514145914890273849,
+    +0.996872143485260161299,
+    +0.997206259372221959076,
+    +0.997517116063472399965,
+    +0.997805354495957274562,
+    +0.998071634524930323302,
+    +0.998316635318407392531,
+    +0.998541055697167906027,
+    +0.998745614468095114704,
+    +0.998931050830810562236,
+    +0.999098124967667597662,
+    +0.999247618943342473599,
+    +0.999380338025023581928,
+    +0.999497112467187190535,
+    +0.999598799671910683252,
+    +0.999686286448317731776,
+    +0.999760490924432047330,
+    +0.999822363679787739196,
+    +0.999872888120357611938,
+    +0.999913081144678282800,
+    +0.999943996207054375764,
+    +0.999966730098486276883,
+    +0.999982430354891598580,
+    +0.999992298136257588028,
+    +0.999997596379748464620,
+    +0.999999672956734384381
+  };
+
+  if ( n == 1 )
+  {
+    webbur::r8vec_copy ( n, x_001, x );
+  }
+  else if ( n == 3 )
+  {
+    webbur::r8vec_copy ( n, x_003, x );
+  }
+  else if ( n == 7 )
+  {
+    webbur::r8vec_copy ( n, x_007, x );
+  }
+  else if ( n == 15 )
+  {
+    webbur::r8vec_copy ( n, x_015, x );
+  }
+  else if ( n == 31 )
+  {
+    webbur::r8vec_copy ( n, x_031, x );
+  }
+  else if ( n == 63 )
+  {
+    webbur::r8vec_copy ( n, x_063, x );
+  }
+  else if ( n == 127 )
+  {
+    webbur::r8vec_copy ( n, x_127, x );
+  }
+  else if ( n == 255 )
+  {
+    webbur::r8vec_copy ( n, x_255, x );
+  }
+  else if ( n == 511 )
+  {
+    webbur::r8vec_copy ( n, x_511, x );
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "PATTERSON_LOOKUP_POINTS - Fatal error!\n";
+    std::cerr << "  Unexpected value of N = " << n << "\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void patterson_lookup_points_np ( int n, int np, double p[], double x[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    PATTERSON_LOOKUP_POINTS_NP looks up Patterson quadrature points.
+//
+//  Discussion:
+//
+//    Our convention is that the abscissas are numbered from left to right.
+//
+//    The rule is defined on [-1,1],
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 December 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Prem Kythe, Michael Schaeferkotter,
+//    Handbook of Computational Methods for Integration,
+//    Chapman and Hall, 2004,
+//    ISBN: 1-58488-428-2,
+//    LC: QA299.3.K98.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    Legal values are 1, 3, 7, 15, 31, 63, 127, 255 and 511.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double X[N], the abscissas.
+//
+{
+  patterson_lookup_points ( n, x );
+
+  return;
+}
+//****************************************************************************80
+
+void patterson_lookup_weights ( int n, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    PATTERSON_LOOKUP_WEIGHTS looks up Patterson quadrature weights.
+//
+//  Discussion:
+//
+//    The allowed orders are 1, 3, 7, 15, 31, 63, 127, 255 and 511.
+//
+//    The weights are positive, symmetric and should sum to 2.
+//
+//    The user must preallocate space for the output array W.
+//
+//    These rules constitute a nested family.  The rules can integrate exactly
+//    any polynomial of degree 1, 5, 11, 23, 47, 95, 191, 383 or 767,
+//    respectively.
+//
+//    The data for N = 511 was supplied by Dirk Laurie, and is derived
+//    from a NAG Library function d01arf.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 September 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Prem Kythe, Michael Schaeferkotter,
+//    Handbook of Computational Methods for Integration,
+//    Chapman and Hall, 2004,
+//    ISBN: 1-58488-428-2,
+//    LC: QA299.3.K98.
+//
+//    NAG Library Documentation,
+//    D01ARF,
+//    The Numerical Algorithms Group.
+//
+//    Thomas Patterson,
+//    The Optimal Addition of Points to Quadrature Formulae,
+//    Mathematics of Computation,
+//    Volume 22, Number 104, October 1968, pages 847-856.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    Legal values are 1, 3, 7, 15, 31, 63, 127, 255 or 511.
+//
+//    Output, double W[N], the weights.
+//
+{
+  static double w_001[1] =
+  {
+    2.0
+  };
+  static double w_003[3] =
+  {
+    0.555555555555555555556,
+    0.888888888888888888889,
+    0.555555555555555555556
+  };
+  static double w_007[7] =
+  {
+    0.104656226026467265194,
+    0.268488089868333440729,
+    0.401397414775962222905,
+    0.450916538658474142345,
+    0.401397414775962222905,
+    0.268488089868333440729,
+    0.104656226026467265194
+  };
+  static double w_015[15] =
+  {
+    0.0170017196299402603390,
+    0.0516032829970797396969,
+    0.0929271953151245376859,
+    0.134415255243784220360,
+    0.171511909136391380787,
+    0.200628529376989021034,
+    0.219156858401587496404,
+    0.225510499798206687386,
+    0.219156858401587496404,
+    0.200628529376989021034,
+    0.171511909136391380787,
+    0.134415255243784220360,
+    0.0929271953151245376859,
+    0.0516032829970797396969,
+    0.0170017196299402603390
+  };
+  static double w_031[31] =
+  {
+    0.00254478079156187441540,
+    0.00843456573932110624631,
+    0.0164460498543878109338,
+    0.0258075980961766535646,
+    0.0359571033071293220968,
+    0.0464628932617579865414,
+    0.0569795094941233574122,
+    0.0672077542959907035404,
+    0.0768796204990035310427,
+    0.0857559200499903511542,
+    0.0936271099812644736167,
+    0.100314278611795578771,
+    0.105669893580234809744,
+    0.109578421055924638237,
+    0.111956873020953456880,
+    0.112755256720768691607,
+    0.111956873020953456880,
+    0.109578421055924638237,
+    0.105669893580234809744,
+    0.100314278611795578771,
+    0.0936271099812644736167,
+    0.0857559200499903511542,
+    0.0768796204990035310427,
+    0.0672077542959907035404,
+    0.0569795094941233574122,
+    0.0464628932617579865414,
+    0.0359571033071293220968,
+    0.0258075980961766535646,
+    0.0164460498543878109338,
+    0.00843456573932110624631,
+    0.00254478079156187441540
+  };
+  static double w_063[63] =
+  {
+    0.000363221481845530659694,
+    0.00126515655623006801137,
+    0.00257904979468568827243,
+    0.00421763044155885483908,
+    0.00611550682211724633968,
+    0.00822300795723592966926,
+    0.0104982469096213218983,
+    0.0129038001003512656260,
+    0.0154067504665594978021,
+    0.0179785515681282703329,
+    0.0205942339159127111492,
+    0.0232314466399102694433,
+    0.0258696793272147469108,
+    0.0284897547458335486125,
+    0.0310735511116879648799,
+    0.0336038771482077305417,
+    0.0360644327807825726401,
+    0.0384398102494555320386,
+    0.0407155101169443189339,
+    0.0428779600250077344929,
+    0.0449145316536321974143,
+    0.0468135549906280124026,
+    0.0485643304066731987159,
+    0.0501571393058995374137,
+    0.0515832539520484587768,
+    0.0528349467901165198621,
+    0.0539054993352660639269,
+    0.0547892105279628650322,
+    0.0554814043565593639878,
+    0.0559784365104763194076,
+    0.0562776998312543012726,
+    0.0563776283603847173877,
+    0.0562776998312543012726,
+    0.0559784365104763194076,
+    0.0554814043565593639878,
+    0.0547892105279628650322,
+    0.0539054993352660639269,
+    0.0528349467901165198621,
+    0.0515832539520484587768,
+    0.0501571393058995374137,
+    0.0485643304066731987159,
+    0.0468135549906280124026,
+    0.0449145316536321974143,
+    0.0428779600250077344929,
+    0.0407155101169443189339,
+    0.0384398102494555320386,
+    0.0360644327807825726401,
+    0.0336038771482077305417,
+    0.0310735511116879648799,
+    0.0284897547458335486125,
+    0.0258696793272147469108,
+    0.0232314466399102694433,
+    0.0205942339159127111492,
+    0.0179785515681282703329,
+    0.0154067504665594978021,
+    0.0129038001003512656260,
+    0.0104982469096213218983,
+    0.00822300795723592966926,
+    0.00611550682211724633968,
+    0.00421763044155885483908,
+    0.00257904979468568827243,
+    0.00126515655623006801137,
+    0.000363221481845530659694
+  };
+  static double w_127[127] =
+  {
+    0.0000505360952078625176247,
+    0.000180739564445388357820,
+    0.000377746646326984660274,
+    0.000632607319362633544219,
+    0.000938369848542381500794,
+    0.00128952408261041739210,
+    0.00168114286542146990631,
+    0.00210881524572663287933,
+    0.00256876494379402037313,
+    0.00305775341017553113613,
+    0.00357289278351729964938,
+    0.00411150397865469304717,
+    0.00467105037211432174741,
+    0.00524912345480885912513,
+    0.00584344987583563950756,
+    0.00645190005017573692280,
+    0.00707248999543355546805,
+    0.00770337523327974184817,
+    0.00834283875396815770558,
+    0.00898927578406413572328,
+    0.00964117772970253669530,
+    0.0102971169579563555237,
+    0.0109557333878379016480,
+    0.0116157233199551347270,
+    0.0122758305600827700870,
+    0.0129348396636073734547,
+    0.0135915710097655467896,
+    0.0142448773729167743063,
+    0.0148936416648151820348,
+    0.0155367755558439824399,
+    0.0161732187295777199419,
+    0.0168019385741038652709,
+    0.0174219301594641737472,
+    0.0180322163903912863201,
+    0.0186318482561387901863,
+    0.0192199051247277660193,
+    0.0197954950480974994880,
+    0.0203577550584721594669,
+    0.0209058514458120238522,
+    0.0214389800125038672465,
+    0.0219563663053178249393,
+    0.0224572658268160987071,
+    0.0229409642293877487608,
+    0.0234067774953140062013,
+    0.0238540521060385400804,
+    0.0242821652033365993580,
+    0.0246905247444876769091,
+    0.0250785696529497687068,
+    0.0254457699654647658126,
+    0.0257916269760242293884,
+    0.0261156733767060976805,
+    0.0264174733950582599310,
+    0.0266966229274503599062,
+    0.0269527496676330319634,
+    0.0271855132296247918192,
+    0.0273946052639814325161,
+    0.0275797495664818730349,
+    0.0277407021782796819939,
+    0.0278772514766137016085,
+    0.0279892182552381597038,
+    0.0280764557938172466068,
+    0.0281388499156271506363,
+    0.0281763190330166021307,
+    0.0281888141801923586938,
+    0.0281763190330166021307,
+    0.0281388499156271506363,
+    0.0280764557938172466068,
+    0.0279892182552381597038,
+    0.0278772514766137016085,
+    0.0277407021782796819939,
+    0.0275797495664818730349,
+    0.0273946052639814325161,
+    0.0271855132296247918192,
+    0.0269527496676330319634,
+    0.0266966229274503599062,
+    0.0264174733950582599310,
+    0.0261156733767060976805,
+    0.0257916269760242293884,
+    0.0254457699654647658126,
+    0.0250785696529497687068,
+    0.0246905247444876769091,
+    0.0242821652033365993580,
+    0.0238540521060385400804,
+    0.0234067774953140062013,
+    0.0229409642293877487608,
+    0.0224572658268160987071,
+    0.0219563663053178249393,
+    0.0214389800125038672465,
+    0.0209058514458120238522,
+    0.0203577550584721594669,
+    0.0197954950480974994880,
+    0.0192199051247277660193,
+    0.0186318482561387901863,
+    0.0180322163903912863201,
+    0.0174219301594641737472,
+    0.0168019385741038652709,
+    0.0161732187295777199419,
+    0.0155367755558439824399,
+    0.0148936416648151820348,
+    0.0142448773729167743063,
+    0.0135915710097655467896,
+    0.0129348396636073734547,
+    0.0122758305600827700870,
+    0.0116157233199551347270,
+    0.0109557333878379016480,
+    0.0102971169579563555237,
+    0.00964117772970253669530,
+    0.00898927578406413572328,
+    0.00834283875396815770558,
+    0.00770337523327974184817,
+    0.00707248999543355546805,
+    0.00645190005017573692280,
+    0.00584344987583563950756,
+    0.00524912345480885912513,
+    0.00467105037211432174741,
+    0.00411150397865469304717,
+    0.00357289278351729964938,
+    0.00305775341017553113613,
+    0.00256876494379402037313,
+    0.00210881524572663287933,
+    0.00168114286542146990631,
+    0.00128952408261041739210,
+    0.000938369848542381500794,
+    0.000632607319362633544219,
+    0.000377746646326984660274,
+    0.000180739564445388357820,
+    0.0000505360952078625176247
+  };
+  static double w_255[255] =
+  {
+    0.69379364324108267170E-05,
+    0.25157870384280661489E-04,
+    0.53275293669780613125E-04,
+    0.90372734658751149261E-04,
+    0.13575491094922871973E-03,
+    0.18887326450650491366E-03,
+    0.24921240048299729402E-03,
+    0.31630366082226447689E-03,
+    0.38974528447328229322E-03,
+    0.46918492424785040975E-03,
+    0.55429531493037471492E-03,
+    0.64476204130572477933E-03,
+    0.74028280424450333046E-03,
+    0.84057143271072246365E-03,
+    0.94536151685852538246E-03,
+    0.10544076228633167722E-02,
+    0.11674841174299594077E-02,
+    0.12843824718970101768E-02,
+    0.14049079956551446427E-02,
+    0.15288767050877655684E-02,
+    0.16561127281544526052E-02,
+    0.17864463917586498247E-02,
+    0.19197129710138724125E-02,
+    0.20557519893273465236E-02,
+    0.21944069253638388388E-02,
+    0.23355251860571608737E-02,
+    0.24789582266575679307E-02,
+    0.26245617274044295626E-02,
+    0.27721957645934509940E-02,
+    0.29217249379178197538E-02,
+    0.30730184347025783234E-02,
+    0.32259500250878684614E-02,
+    0.33803979910869203823E-02,
+    0.35362449977167777340E-02,
+    0.36933779170256508183E-02,
+    0.38516876166398709241E-02,
+    0.40110687240750233989E-02,
+    0.41714193769840788528E-02,
+    0.43326409680929828545E-02,
+    0.44946378920320678616E-02,
+    0.46573172997568547773E-02,
+    0.48205888648512683476E-02,
+    0.49843645647655386012E-02,
+    0.51485584789781777618E-02,
+    0.53130866051870565663E-02,
+    0.54778666939189508240E-02,
+    0.56428181013844441585E-02,
+    0.58078616599775673635E-02,
+    0.59729195655081658049E-02,
+    0.61379152800413850435E-02,
+    0.63027734490857587172E-02,
+    0.64674198318036867274E-02,
+    0.66317812429018878941E-02,
+    0.67957855048827733948E-02,
+    0.69593614093904229394E-02,
+    0.71224386864583871532E-02,
+    0.72849479805538070639E-02,
+    0.74468208324075910174E-02,
+    0.76079896657190565832E-02,
+    0.77683877779219912200E-02,
+    0.79279493342948491103E-02,
+    0.80866093647888599710E-02,
+    0.82443037630328680306E-02,
+    0.84009692870519326354E-02,
+    0.85565435613076896192E-02,
+    0.87109650797320868736E-02,
+    0.88641732094824942641E-02,
+    0.90161081951956431600E-02,
+    0.91667111635607884067E-02,
+    0.93159241280693950932E-02,
+    0.94636899938300652943E-02,
+    0.96099525623638830097E-02,
+    0.97546565363174114611E-02,
+    0.98977475240487497440E-02,
+    0.10039172044056840798E-01,
+    0.10178877529236079733E-01,
+    0.10316812330947621682E-01,
+    0.10452925722906011926E-01,
+    0.10587167904885197931E-01,
+    0.10719490006251933623E-01,
+    0.10849844089337314099E-01,
+    0.10978183152658912470E-01,
+    0.11104461134006926537E-01,
+    0.11228632913408049354E-01,
+    0.11350654315980596602E-01,
+    0.11470482114693874380E-01,
+    0.11588074033043952568E-01,
+    0.11703388747657003101E-01,
+    0.11816385890830235763E-01,
+    0.11927026053019270040E-01,
+    0.12035270785279562630E-01,
+    0.12141082601668299679E-01,
+    0.12244424981611985899E-01,
+    0.12345262372243838455E-01,
+    0.12443560190714035263E-01,
+    0.12539284826474884353E-01,
+    0.12632403643542078765E-01,
+    0.12722884982732382906E-01,
+    0.12810698163877361967E-01,
+    0.12895813488012114694E-01,
+    0.12978202239537399286E-01,
+    0.13057836688353048840E-01,
+    0.13134690091960152836E-01,
+    0.13208736697529129966E-01,
+    0.13279951743930530650E-01,
+    0.13348311463725179953E-01,
+    0.13413793085110098513E-01,
+    0.13476374833816515982E-01,
+    0.13536035934956213614E-01,
+    0.13592756614812395910E-01,
+    0.13646518102571291428E-01,
+    0.13697302631990716258E-01,
+    0.13745093443001896632E-01,
+    0.13789874783240936517E-01,
+    0.13831631909506428676E-01,
+    0.13870351089139840997E-01,
+    0.13906019601325461264E-01,
+    0.13938625738306850804E-01,
+    0.13968158806516938516E-01,
+    0.13994609127619079852E-01,
+    0.14017968039456608810E-01,
+    0.14038227896908623303E-01,
+    0.14055382072649964277E-01,
+    0.14069424957813575318E-01,
+    0.14080351962553661325E-01,
+    0.14088159516508301065E-01,
+    0.14092845069160408355E-01,
+    0.14094407090096179347E-01,
+    0.14092845069160408355E-01,
+    0.14088159516508301065E-01,
+    0.14080351962553661325E-01,
+    0.14069424957813575318E-01,
+    0.14055382072649964277E-01,
+    0.14038227896908623303E-01,
+    0.14017968039456608810E-01,
+    0.13994609127619079852E-01,
+    0.13968158806516938516E-01,
+    0.13938625738306850804E-01,
+    0.13906019601325461264E-01,
+    0.13870351089139840997E-01,
+    0.13831631909506428676E-01,
+    0.13789874783240936517E-01,
+    0.13745093443001896632E-01,
+    0.13697302631990716258E-01,
+    0.13646518102571291428E-01,
+    0.13592756614812395910E-01,
+    0.13536035934956213614E-01,
+    0.13476374833816515982E-01,
+    0.13413793085110098513E-01,
+    0.13348311463725179953E-01,
+    0.13279951743930530650E-01,
+    0.13208736697529129966E-01,
+    0.13134690091960152836E-01,
+    0.13057836688353048840E-01,
+    0.12978202239537399286E-01,
+    0.12895813488012114694E-01,
+    0.12810698163877361967E-01,
+    0.12722884982732382906E-01,
+    0.12632403643542078765E-01,
+    0.12539284826474884353E-01,
+    0.12443560190714035263E-01,
+    0.12345262372243838455E-01,
+    0.12244424981611985899E-01,
+    0.12141082601668299679E-01,
+    0.12035270785279562630E-01,
+    0.11927026053019270040E-01,
+    0.11816385890830235763E-01,
+    0.11703388747657003101E-01,
+    0.11588074033043952568E-01,
+    0.11470482114693874380E-01,
+    0.11350654315980596602E-01,
+    0.11228632913408049354E-01,
+    0.11104461134006926537E-01,
+    0.10978183152658912470E-01,
+    0.10849844089337314099E-01,
+    0.10719490006251933623E-01,
+    0.10587167904885197931E-01,
+    0.10452925722906011926E-01,
+    0.10316812330947621682E-01,
+    0.10178877529236079733E-01,
+    0.10039172044056840798E-01,
+    0.98977475240487497440E-02,
+    0.97546565363174114611E-02,
+    0.96099525623638830097E-02,
+    0.94636899938300652943E-02,
+    0.93159241280693950932E-02,
+    0.91667111635607884067E-02,
+    0.90161081951956431600E-02,
+    0.88641732094824942641E-02,
+    0.87109650797320868736E-02,
+    0.85565435613076896192E-02,
+    0.84009692870519326354E-02,
+    0.82443037630328680306E-02,
+    0.80866093647888599710E-02,
+    0.79279493342948491103E-02,
+    0.77683877779219912200E-02,
+    0.76079896657190565832E-02,
+    0.74468208324075910174E-02,
+    0.72849479805538070639E-02,
+    0.71224386864583871532E-02,
+    0.69593614093904229394E-02,
+    0.67957855048827733948E-02,
+    0.66317812429018878941E-02,
+    0.64674198318036867274E-02,
+    0.63027734490857587172E-02,
+    0.61379152800413850435E-02,
+    0.59729195655081658049E-02,
+    0.58078616599775673635E-02,
+    0.56428181013844441585E-02,
+    0.54778666939189508240E-02,
+    0.53130866051870565663E-02,
+    0.51485584789781777618E-02,
+    0.49843645647655386012E-02,
+    0.48205888648512683476E-02,
+    0.46573172997568547773E-02,
+    0.44946378920320678616E-02,
+    0.43326409680929828545E-02,
+    0.41714193769840788528E-02,
+    0.40110687240750233989E-02,
+    0.38516876166398709241E-02,
+    0.36933779170256508183E-02,
+    0.35362449977167777340E-02,
+    0.33803979910869203823E-02,
+    0.32259500250878684614E-02,
+    0.30730184347025783234E-02,
+    0.29217249379178197538E-02,
+    0.27721957645934509940E-02,
+    0.26245617274044295626E-02,
+    0.24789582266575679307E-02,
+    0.23355251860571608737E-02,
+    0.21944069253638388388E-02,
+    0.20557519893273465236E-02,
+    0.19197129710138724125E-02,
+    0.17864463917586498247E-02,
+    0.16561127281544526052E-02,
+    0.15288767050877655684E-02,
+    0.14049079956551446427E-02,
+    0.12843824718970101768E-02,
+    0.11674841174299594077E-02,
+    0.10544076228633167722E-02,
+    0.94536151685852538246E-03,
+    0.84057143271072246365E-03,
+    0.74028280424450333046E-03,
+    0.64476204130572477933E-03,
+    0.55429531493037471492E-03,
+    0.46918492424785040975E-03,
+    0.38974528447328229322E-03,
+    0.31630366082226447689E-03,
+    0.24921240048299729402E-03,
+    0.18887326450650491366E-03,
+    0.13575491094922871973E-03,
+    0.90372734658751149261E-04,
+    0.53275293669780613125E-04,
+    0.25157870384280661489E-04,
+    0.69379364324108267170E-05
+  };
+  static double w_511[511] =
+  {
+    0.945715933950007048827E-06,
+    0.345456507169149134898E-05,
+    0.736624069102321668857E-05,
+    0.125792781889592743525E-04,
+    0.190213681905875816679E-04,
+    0.266376412339000901358E-04,
+    0.353751372055189588628E-04,
+    0.451863674126296143105E-04,
+    0.560319507856164252140E-04,
+    0.678774554733972416227E-04,
+    0.806899228014035293851E-04,
+    0.944366322532705527066E-04,
+    0.109085545645741522051E-03,
+    0.124606200241498368482E-03,
+    0.140970302204104791413E-03,
+    0.158151830411132242924E-03,
+    0.176126765545083195474E-03,
+    0.194872642236641146532E-03,
+    0.214368090034216937149E-03,
+    0.234592462123925204879E-03,
+    0.255525589595236862014E-03,
+    0.277147657465187357459E-03,
+    0.299439176850911730874E-03,
+    0.322381020652862389664E-03,
+    0.345954492129903871350E-03,
+    0.370141402122251665232E-03,
+    0.394924138246873704434E-03,
+    0.420285716355361231823E-03,
+    0.446209810101403247488E-03,
+    0.472680758429262691232E-03,
+    0.499683553312800484519E-03,
+    0.527203811431658386125E-03,
+    0.555227733977307579715E-03,
+    0.583742058714979703847E-03,
+    0.612734008012225209294E-03,
+    0.642191235948505088403E-03,
+    0.672101776960108194646E-03,
+    0.702453997827572321358E-03,
+    0.733236554224767912055E-03,
+    0.764438352543882784191E-03,
+    0.796048517297550871506E-03,
+    0.828056364077226302608E-03,
+    0.860451377808527848128E-03,
+    0.893223195879324912340E-03,
+    0.926361595613111283368E-03,
+    0.959856485506936206261E-03,
+    0.993697899638760857945E-03,
+    0.102787599466367326179E-02,
+    0.106238104885340071375E-02,
+    0.109720346268191941940E-02,
+    0.113233376051597664917E-02,
+    0.116776259302858043685E-02,
+    0.120348074001265964881E-02,
+    0.123947911332878396534E-02,
+    0.127574875977346947345E-02,
+    0.131228086370221478128E-02,
+    0.134906674928353113127E-02,
+    0.138609788229672549700E-02,
+    0.142336587141720519900E-02,
+    0.146086246895890987689E-02,
+    0.149857957106456636214E-02,
+    0.153650921735128916170E-02,
+    0.157464359003212166189E-02,
+    0.161297501254393423070E-02,
+    0.165149594771914570655E-02,
+    0.169019899554346019117E-02,
+    0.172907689054461607168E-02,
+    0.176812249885838886701E-02,
+    0.180732881501808930079E-02,
+    0.184668895851282540913E-02,
+    0.188619617015808475394E-02,
+    0.192584380831993546204E-02,
+    0.196562534503150547732E-02,
+    0.200553436203751169944E-02,
+    0.204556454679958293446E-02,
+    0.208570968849203942640E-02,
+    0.212596367401472533045E-02,
+    0.216632048404649142727E-02,
+    0.220677418916003329194E-02,
+    0.224731894601603393082E-02,
+    0.228794899365195972378E-02,
+    0.232865864987842738864E-02,
+    0.236944230779380495146E-02,
+    0.241029443242563417382E-02,
+    0.245120955750556483923E-02,
+    0.249218228238276930060E-02,
+    0.253320726907925325750E-02,
+    0.257427923948908888092E-02,
+    0.261539297272236109225E-02,
+    0.265654330259352828314E-02,
+    0.269772511525294586667E-02,
+    0.273893334695947541201E-02,
+    0.278016298199139435045E-02,
+    0.282140905069222207923E-02,
+    0.286266662764757868253E-02,
+    0.290393082998878368175E-02,
+    0.294519681581857582284E-02,
+    0.298645978275408290247E-02,
+    0.302771496658198544480E-02,
+    0.306895764002069252174E-02,
+    0.311018311158427546158E-02,
+    0.315138672454287935858E-02,
+    0.319256385597434736790E-02,
+    0.323370991590184336368E-02,
+    0.327482034651233969564E-02,
+    0.331589062145094394706E-02,
+    0.335691624518616761342E-02,
+    0.339789275244138669739E-02,
+    0.343881570768790591876E-02,
+    0.347968070469521146972E-02,
+    0.352048336613417922682E-02,
+    0.356121934322919357659E-02,
+    0.360188431545532431869E-02,
+    0.364247399027690353194E-02,
+    0.368298410292403911967E-02,
+    0.372341041620379550870E-02,
+    0.376374872034296338241E-02,
+    0.380399483285952829161E-02,
+    0.384414459846013158917E-02,
+    0.388419388896099560998E-02,
+    0.392413860322995774660E-02,
+    0.396397466714742455513E-02,
+    0.400369803358421688562E-02,
+    0.404330468239442998549E-02,
+    0.408279062042157838350E-02,
+    0.412215188151643401528E-02,
+    0.416138452656509745764E-02,
+    0.420048464352596631772E-02,
+    0.423944834747438184434E-02,
+    0.427827178065384480959E-02,
+    0.431695111253279479928E-02,
+    0.435548253986604343679E-02,
+    0.439386228676004195260E-02,
+    0.443208660474124713206E-02,
+    0.447015177282692726900E-02,
+    0.450805409759782158001E-02,
+    0.454578991327213285488E-02,
+    0.458335558178039420335E-02,
+    0.462074749284080687482E-02,
+    0.465796206403469754658E-02,
+    0.469499574088179046532E-02,
+    0.473184499691503264714E-02,
+    0.476850633375474925263E-02,
+    0.480497628118194150483E-02,
+    0.484125139721057135214E-02,
+    0.487732826815870573054E-02,
+    0.491320350871841897367E-02,
+    0.494887376202437487201E-02,
+    0.498433569972103029914E-02,
+    0.501958602202842039909E-02,
+    0.505462145780650125058E-02,
+    0.508943876461803986674E-02,
+    0.512403472879005351831E-02,
+    0.515840616547381084096E-02,
+    0.519254991870341614863E-02,
+    0.522646286145300596306E-02,
+    0.526014189569259311205E-02,
+    0.529358395244259896547E-02,
+    0.532678599182711857974E-02,
+    0.535974500312596681161E-02,
+    0.539245800482555593606E-02,
+    0.542492204466865704951E-02,
+    0.545713419970309863995E-02,
+    0.548909157632945623482E-02,
+    0.552079131034778706457E-02,
+    0.555223056700346326850E-02,
+    0.558340654103215637610E-02,
+    0.561431645670402467678E-02,
+    0.564495756786715368885E-02,
+    0.567532715799029830087E-02,
+    0.570542254020497332312E-02,
+    0.573524105734693719020E-02,
+    0.576478008199711142954E-02,
+    0.579403701652197628421E-02,
+    0.582300929311348057702E-02,
+    0.585169437382850155033E-02,
+    0.588008975062788803205E-02,
+    0.590819294541511788161E-02,
+    0.593600151007459827614E-02,
+    0.596351302650963502011E-02,
+    0.599072510668009471472E-02,
+    0.601763539263978131522E-02,
+    0.604424155657354634589E-02,
+    0.607054130083414983949E-02,
+    0.609653235797888692923E-02,
+    0.612221249080599294931E-02,
+    0.614757949239083790214E-02,
+    0.617263118612191922727E-02,
+    0.619736542573665996342E-02,
+    0.622178009535701763157E-02,
+    0.624587310952490748541E-02,
+    0.626964241323744217671E-02,
+    0.629308598198198836688E-02,
+    0.631620182177103938227E-02,
+    0.633898796917690165912E-02,
+    0.636144249136619145314E-02,
+    0.638356348613413709795E-02,
+    0.640534908193868098342E-02,
+    0.642679743793437438922E-02,
+    0.644790674400605734710E-02,
+    0.646867522080231481688E-02,
+    0.648910111976869964292E-02,
+    0.650918272318071200827E-02,
+    0.652891834417652442012E-02,
+    0.654830632678944064054E-02,
+    0.656734504598007641819E-02,
+    0.658603290766824937794E-02,
+    0.660436834876456498276E-02,
+    0.662234983720168509457E-02,
+    0.663997587196526532519E-02,
+    0.665724498312454708217E-02,
+    0.667415573186258997654E-02,
+    0.669070671050613006584E-02,
+    0.670689654255504925648E-02,
+    0.672272388271144108036E-02,
+    0.673818741690825799086E-02,
+    0.675328586233752529078E-02,
+    0.676801796747810680683E-02,
+    0.678238251212300746082E-02,
+    0.679637830740619795480E-02,
+    0.681000419582894688374E-02,
+    0.682325905128564571420E-02,
+    0.683614177908911221841E-02,
+    0.684865131599535812903E-02,
+    0.686078663022780697951E-02,
+    0.687254672150094831613E-02,
+    0.688393062104341470995E-02,
+    0.689493739162046825872E-02,
+    0.690556612755588354803E-02,
+    0.691581595475321433825E-02,
+    0.692568603071643155621E-02,
+    0.693517554456992049848E-02,
+    0.694428371707782549438E-02,
+    0.695300980066273063177E-02,
+    0.696135307942366551493E-02,
+    0.696931286915342540213E-02,
+    0.697688851735519545845E-02,
+    0.698407940325846925786E-02,
+    0.699088493783425207545E-02,
+    0.699730456380953992594E-02,
+    0.700333775568106572820E-02,
+    0.700898401972830440494E-02,
+    0.701424289402572916425E-02,
+    0.701911394845431165171E-02,
+    0.702359678471225911031E-02,
+    0.702769103632498213858E-02,
+    0.703139636865428709508E-02,
+    0.703471247890678765907E-02,
+    0.703763909614153052319E-02,
+    0.704017598127683066242E-02,
+    0.704232292709631209597E-02,
+    0.704407975825415053266E-02,
+    0.704544633127951476780E-02,
+    0.704642253458020417748E-02,
+    0.704700828844548013730E-02,
+    0.704720354504808967346E-02,
+    0.704700828844548013730E-02,
+    0.704642253458020417748E-02,
+    0.704544633127951476780E-02,
+    0.704407975825415053266E-02,
+    0.704232292709631209597E-02,
+    0.704017598127683066242E-02,
+    0.703763909614153052319E-02,
+    0.703471247890678765907E-02,
+    0.703139636865428709508E-02,
+    0.702769103632498213858E-02,
+    0.702359678471225911031E-02,
+    0.701911394845431165171E-02,
+    0.701424289402572916425E-02,
+    0.700898401972830440494E-02,
+    0.700333775568106572820E-02,
+    0.699730456380953992594E-02,
+    0.699088493783425207545E-02,
+    0.698407940325846925786E-02,
+    0.697688851735519545845E-02,
+    0.696931286915342540213E-02,
+    0.696135307942366551493E-02,
+    0.695300980066273063177E-02,
+    0.694428371707782549438E-02,
+    0.693517554456992049848E-02,
+    0.692568603071643155621E-02,
+    0.691581595475321433825E-02,
+    0.690556612755588354803E-02,
+    0.689493739162046825872E-02,
+    0.688393062104341470995E-02,
+    0.687254672150094831613E-02,
+    0.686078663022780697951E-02,
+    0.684865131599535812903E-02,
+    0.683614177908911221841E-02,
+    0.682325905128564571420E-02,
+    0.681000419582894688374E-02,
+    0.679637830740619795480E-02,
+    0.678238251212300746082E-02,
+    0.676801796747810680683E-02,
+    0.675328586233752529078E-02,
+    0.673818741690825799086E-02,
+    0.672272388271144108036E-02,
+    0.670689654255504925648E-02,
+    0.669070671050613006584E-02,
+    0.667415573186258997654E-02,
+    0.665724498312454708217E-02,
+    0.663997587196526532519E-02,
+    0.662234983720168509457E-02,
+    0.660436834876456498276E-02,
+    0.658603290766824937794E-02,
+    0.656734504598007641819E-02,
+    0.654830632678944064054E-02,
+    0.652891834417652442012E-02,
+    0.650918272318071200827E-02,
+    0.648910111976869964292E-02,
+    0.646867522080231481688E-02,
+    0.644790674400605734710E-02,
+    0.642679743793437438922E-02,
+    0.640534908193868098342E-02,
+    0.638356348613413709795E-02,
+    0.636144249136619145314E-02,
+    0.633898796917690165912E-02,
+    0.631620182177103938227E-02,
+    0.629308598198198836688E-02,
+    0.626964241323744217671E-02,
+    0.624587310952490748541E-02,
+    0.622178009535701763157E-02,
+    0.619736542573665996342E-02,
+    0.617263118612191922727E-02,
+    0.614757949239083790214E-02,
+    0.612221249080599294931E-02,
+    0.609653235797888692923E-02,
+    0.607054130083414983949E-02,
+    0.604424155657354634589E-02,
+    0.601763539263978131522E-02,
+    0.599072510668009471472E-02,
+    0.596351302650963502011E-02,
+    0.593600151007459827614E-02,
+    0.590819294541511788161E-02,
+    0.588008975062788803205E-02,
+    0.585169437382850155033E-02,
+    0.582300929311348057702E-02,
+    0.579403701652197628421E-02,
+    0.576478008199711142954E-02,
+    0.573524105734693719020E-02,
+    0.570542254020497332312E-02,
+    0.567532715799029830087E-02,
+    0.564495756786715368885E-02,
+    0.561431645670402467678E-02,
+    0.558340654103215637610E-02,
+    0.555223056700346326850E-02,
+    0.552079131034778706457E-02,
+    0.548909157632945623482E-02,
+    0.545713419970309863995E-02,
+    0.542492204466865704951E-02,
+    0.539245800482555593606E-02,
+    0.535974500312596681161E-02,
+    0.532678599182711857974E-02,
+    0.529358395244259896547E-02,
+    0.526014189569259311205E-02,
+    0.522646286145300596306E-02,
+    0.519254991870341614863E-02,
+    0.515840616547381084096E-02,
+    0.512403472879005351831E-02,
+    0.508943876461803986674E-02,
+    0.505462145780650125058E-02,
+    0.501958602202842039909E-02,
+    0.498433569972103029914E-02,
+    0.494887376202437487201E-02,
+    0.491320350871841897367E-02,
+    0.487732826815870573054E-02,
+    0.484125139721057135214E-02,
+    0.480497628118194150483E-02,
+    0.476850633375474925263E-02,
+    0.473184499691503264714E-02,
+    0.469499574088179046532E-02,
+    0.465796206403469754658E-02,
+    0.462074749284080687482E-02,
+    0.458335558178039420335E-02,
+    0.454578991327213285488E-02,
+    0.450805409759782158001E-02,
+    0.447015177282692726900E-02,
+    0.443208660474124713206E-02,
+    0.439386228676004195260E-02,
+    0.435548253986604343679E-02,
+    0.431695111253279479928E-02,
+    0.427827178065384480959E-02,
+    0.423944834747438184434E-02,
+    0.420048464352596631772E-02,
+    0.416138452656509745764E-02,
+    0.412215188151643401528E-02,
+    0.408279062042157838350E-02,
+    0.404330468239442998549E-02,
+    0.400369803358421688562E-02,
+    0.396397466714742455513E-02,
+    0.392413860322995774660E-02,
+    0.388419388896099560998E-02,
+    0.384414459846013158917E-02,
+    0.380399483285952829161E-02,
+    0.376374872034296338241E-02,
+    0.372341041620379550870E-02,
+    0.368298410292403911967E-02,
+    0.364247399027690353194E-02,
+    0.360188431545532431869E-02,
+    0.356121934322919357659E-02,
+    0.352048336613417922682E-02,
+    0.347968070469521146972E-02,
+    0.343881570768790591876E-02,
+    0.339789275244138669739E-02,
+    0.335691624518616761342E-02,
+    0.331589062145094394706E-02,
+    0.327482034651233969564E-02,
+    0.323370991590184336368E-02,
+    0.319256385597434736790E-02,
+    0.315138672454287935858E-02,
+    0.311018311158427546158E-02,
+    0.306895764002069252174E-02,
+    0.302771496658198544480E-02,
+    0.298645978275408290247E-02,
+    0.294519681581857582284E-02,
+    0.290393082998878368175E-02,
+    0.286266662764757868253E-02,
+    0.282140905069222207923E-02,
+    0.278016298199139435045E-02,
+    0.273893334695947541201E-02,
+    0.269772511525294586667E-02,
+    0.265654330259352828314E-02,
+    0.261539297272236109225E-02,
+    0.257427923948908888092E-02,
+    0.253320726907925325750E-02,
+    0.249218228238276930060E-02,
+    0.245120955750556483923E-02,
+    0.241029443242563417382E-02,
+    0.236944230779380495146E-02,
+    0.232865864987842738864E-02,
+    0.228794899365195972378E-02,
+    0.224731894601603393082E-02,
+    0.220677418916003329194E-02,
+    0.216632048404649142727E-02,
+    0.212596367401472533045E-02,
+    0.208570968849203942640E-02,
+    0.204556454679958293446E-02,
+    0.200553436203751169944E-02,
+    0.196562534503150547732E-02,
+    0.192584380831993546204E-02,
+    0.188619617015808475394E-02,
+    0.184668895851282540913E-02,
+    0.180732881501808930079E-02,
+    0.176812249885838886701E-02,
+    0.172907689054461607168E-02,
+    0.169019899554346019117E-02,
+    0.165149594771914570655E-02,
+    0.161297501254393423070E-02,
+    0.157464359003212166189E-02,
+    0.153650921735128916170E-02,
+    0.149857957106456636214E-02,
+    0.146086246895890987689E-02,
+    0.142336587141720519900E-02,
+    0.138609788229672549700E-02,
+    0.134906674928353113127E-02,
+    0.131228086370221478128E-02,
+    0.127574875977346947345E-02,
+    0.123947911332878396534E-02,
+    0.120348074001265964881E-02,
+    0.116776259302858043685E-02,
+    0.113233376051597664917E-02,
+    0.109720346268191941940E-02,
+    0.106238104885340071375E-02,
+    0.102787599466367326179E-02,
+    0.993697899638760857945E-03,
+    0.959856485506936206261E-03,
+    0.926361595613111283368E-03,
+    0.893223195879324912340E-03,
+    0.860451377808527848128E-03,
+    0.828056364077226302608E-03,
+    0.796048517297550871506E-03,
+    0.764438352543882784191E-03,
+    0.733236554224767912055E-03,
+    0.702453997827572321358E-03,
+    0.672101776960108194646E-03,
+    0.642191235948505088403E-03,
+    0.612734008012225209294E-03,
+    0.583742058714979703847E-03,
+    0.555227733977307579715E-03,
+    0.527203811431658386125E-03,
+    0.499683553312800484519E-03,
+    0.472680758429262691232E-03,
+    0.446209810101403247488E-03,
+    0.420285716355361231823E-03,
+    0.394924138246873704434E-03,
+    0.370141402122251665232E-03,
+    0.345954492129903871350E-03,
+    0.322381020652862389664E-03,
+    0.299439176850911730874E-03,
+    0.277147657465187357459E-03,
+    0.255525589595236862014E-03,
+    0.234592462123925204879E-03,
+    0.214368090034216937149E-03,
+    0.194872642236641146532E-03,
+    0.176126765545083195474E-03,
+    0.158151830411132242924E-03,
+    0.140970302204104791413E-03,
+    0.124606200241498368482E-03,
+    0.109085545645741522051E-03,
+    0.944366322532705527066E-04,
+    0.806899228014035293851E-04,
+    0.678774554733972416227E-04,
+    0.560319507856164252140E-04,
+    0.451863674126296143105E-04,
+    0.353751372055189588628E-04,
+    0.266376412339000901358E-04,
+    0.190213681905875816679E-04,
+    0.125792781889592743525E-04,
+    0.736624069102321668857E-05,
+    0.345456507169149134898E-05,
+    0.945715933950007048827E-06,
+  };
+
+  if ( n == 1 )
+  {
+    webbur::r8vec_copy ( n, w_001, w );
+  }
+  else if ( n == 3 )
+  {
+    webbur::r8vec_copy ( n, w_003, w );
+  }
+  else if ( n == 7 )
+  {
+    webbur::r8vec_copy ( n, w_007, w );
+  }
+  else if ( n == 15 )
+  {
+    webbur::r8vec_copy ( n, w_015, w );
+  }
+  else if ( n == 31 )
+  {
+    webbur::r8vec_copy ( n, w_031, w );
+  }
+  else if ( n == 63 )
+  {
+    webbur::r8vec_copy ( n, w_063, w );
+  }
+  else if ( n == 127 )
+  {
+    webbur::r8vec_copy ( n, w_127, w );
+  }
+  else if ( n == 255 )
+  {
+    webbur::r8vec_copy ( n, w_255, w );
+  }
+  else if ( n == 511 )
+  {
+    webbur::r8vec_copy ( n, w_511, w );
+  }
+  else
+  {
+    std::cerr << "\n";
+    std::cerr << "PATTERSON_LOOKUP_WEIGHTS - Fatal error!\n";
+    std::cerr << "  Unexpected value of N = " << n << ".\n";
+    std::exit ( 1 );
+  }
+  return;
+}
+//****************************************************************************80
+
+void patterson_lookup_weights_np ( int n, int np, double p[], double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    PATTERSON_LOOKUP_WEIGHTS_NP looks up Patterson quadrature weights.
+//
+//  Discussion:
+//
+//    The allowed orders are 1, 3, 7, 15, 31, 63, 127, 255 and 511.
+//
+//    The weights are positive, symmetric and should sum to 2.
+//
+//    The user must preallocate space for the output array W.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    25 April 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Milton Abramowitz, Irene Stegun,
+//    Handbook of Mathematical Functions,
+//    National Bureau of Standards, 1964,
+//    ISBN: 0-486-61272-4,
+//    LC: QA47.A34.
+//
+//    Arthur Stroud, Don Secrest,
+//    Gaussian Quadrature Formulas,
+//    Prentice Hall, 1966,
+//    LC: QA299.4G3S7.
+//
+//  Parameters:
+//
+//    Input, int N, the order.
+//    Legal values are 1, 3, 7, 15, 31, 63, 127, 255 or 511.
+//
+//    Input, int NP, the number of parameters.
+//
+//    Input, double P[NP], parameters which are not needed by this function.
+//
+//    Output, double W[N], the weights.
+//
+{
+  patterson_lookup_weights ( n, w );
+
+  return;
+}
+//****************************************************************************80
+
+int point_radial_tol_unique_count ( int m, int n, double a[], double tol,
+  int *seed )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_COUNT counts the tolerably unique points.
+//
+//  Discussion:
+//
+//    The input data is an M x N array A, representing the M-dimensional
+//    coordinates of N points.
+//
+//    The output is the number of tolerably unique points in the list.
+//
+//    This program performs the same task as POINT_TOL_UNIQUE_COUNT.
+//    But that program is guaranteed to use N^2 comparisons.
+//
+//    It is hoped that this function, on the other hand, will tend
+//    to use O(N) comparisons after an O(NLog(N)) sort.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 July 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N, the number of columns.
+//
+//    Input, double A[M*N], the array of N columns of data.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Input/output, int *SEED, a seed for the random
+//    number generator.
+//
+//    Output, int POINT_RADIAL_TOL_UNIQUE_COUNT, the number of tolerably
+//    unique points.
+//
+{
+  double dist;
+  int hi;
+  int i;
+  int *indx;
+  int j;
+  int k;
+  double *r;
+  bool *unique;
+  int unique_num;
+  double *w;
+  double w_sum;
+  double *z;
+
+  if ( n <= 0 )
+  {
+    unique_num = 0;
+    return unique_num;
+  }
+//
+//  Assign a base point Z randomly in the convex hull.
+//
+  w = webbur::r8vec_uniform_01_new ( n, seed );
+  w_sum = webbur::r8vec_sum ( n, w );
+  for ( j = 0; j < n; j++ )
+  {
+    w[j] = w[j] / w_sum;
+  }
+
+  z = new double[m];
+  for ( i = 0; i < m; i++ )
+  {
+    z[i] = 0.0;
+    for ( j = 0; j < n; j++ )
+    {
+      z[i] = z[i] + a[i+j*m] * w[j];
+    }
+  }
+//
+//  Compute the radial distance R of each point to Z.
+//
+  r = new double[n];
+
+  for ( j = 0; j < n; j++ )
+  {
+    r[j] = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      r[j] = r[j] + std::pow ( a[i+j*m] - z[i], 2 );
+    }
+    r[j] = std::sqrt ( r[j] );
+  }
+//
+//  Implicitly sort the R array.
+//
+  indx = webbur::r8vec_sort_heap_index_a_new ( n, r );
+//
+//  To determine if a point I is tolerably unique, we only have to check
+//  whether it is distinct from all points J such that R(I) <= R(J) <= R(J)+TOL.
+//
+  unique_num = 0;
+
+  unique = new bool[n];
+  for ( i = 0; i < n; i++ )
+  {
+    unique[i] = true;
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    if ( unique[indx[i]] )
+    {
+//
+//  Point INDX(I) is unique, in that no earlier point is near it.
+//
+      unique_num = unique_num + 1;
+//
+//  Look for later points which are close to point INDX(I)
+//  in terms of R.
+//
+      hi = i;
+
+      while ( hi < n - 1 )
+      {
+        if ( r[indx[i]] + tol < r[indx[hi+1]] )
+        {
+          break;
+        }
+        hi = hi + 1;
+      }
+//
+//  Points INDX(I+1) through INDX(HI) have an R value close to
+//  point INDX(I).  Are they truly close to point INDEX(I)?
+//
+      for ( j = i + 1; j <= hi; j++ )
+      {
+        if ( unique[indx[j]] )
+        {
+          dist = 0.0;
+          for ( k = 0; k < m; k++ )
+          {
+            dist = dist + std::pow ( a[k+indx[i]*m] - a[k+indx[j]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+
+          if ( dist <= tol )
+          {
+            unique[indx[j]] = false;
+          }
+        }
+      }
+    }
+  }
+
+  delete [] indx;
+  delete [] r;
+  delete [] unique;
+  delete [] w;
+  delete [] z;
+
+  return unique_num;
+}
+//****************************************************************************80
+
+void point_radial_tol_unique_count_inc1 ( int m, int n1, double a1[],
+  double tol, int *seed, double z[], double r1[], int indx1[], bool unique1[],
+  int *unique_num1 )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_COUNT_INC1 counts the tolerably unique points.
+//
+//  Discussion:
+//
+//    The input data includes an M x N1 array A1 of a set of N1
+//    "permanent" points and N2 "temporary" points.
+//
+//    This is a two step version of POINT_RADIAL_TOL_UNIQUE_COUNT_INC.
+//
+//    This means that we want to identify the tolerably unique points
+//    among the permanent points before processing the temporary points.
+//
+//    If many sets of temporary data are considered, this function will
+//    do a lot of unnecessary work resorting the permanent data; it would
+//    be possible to avoid repetitions of that work at the expense of saving
+//    various work vectors.  This function accepts the overhead of the
+//    repeated calculations for the benefit of only having to "remember"
+//    the number of unique points discovered.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N1, the number of permanent points.
+//
+//    Input, double A1[M*N1], the permanent points.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Input/output, int *SEED, a seed for the random
+//    number generator.
+//
+//    Output, double Z[M], a random base vector used to
+//    linearly sort the data.
+//
+//    Output, double R1[N1], the scalar values assigned to
+//    the data for sorting.
+//
+//    Output, int INDX1[N1], the ascending sort index
+//    for A1.
+//
+//    Output, bool UNIQUE1[N1], is TRUE for each unique permanent point.
+//
+//    Output, int *UNIQUE_NUM1, the number of tolerably
+//    unique permanent points.
+//
+{
+  double dist;
+  int hi;
+  int i;
+  int j;
+  int j1;
+  int k1;
+  double *w;
+  double w_sum;
+//
+//  Assign a base point Z randomly in the convex hull of the permanent points.
+//
+  w = webbur::r8vec_uniform_01_new ( n1, seed );
+  w_sum = webbur::r8vec_sum ( n1, w );
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    w[j1] = w[j1] / w_sum;
+  }
+  for ( i = 0; i < m; i++ )
+  {
+    z[i] = 0.0;
+    for ( j1 = 0; j1 < n1; j1++ )
+    {
+      z[i] = z[i] + a1[i+j1*m] * w[j1];
+    }
+  }
+//
+//  Initialize the permanent point data.
+//
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    r1[j1] = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      r1[j1] = r1[j1] + std::pow ( a1[i+j1*m] - z[i], 2 );
+    }
+    r1[j1] = std::sqrt ( r1[j1] );
+  }
+  webbur::r8vec_sort_heap_index_a ( n1, r1, indx1 );
+
+  *unique_num1 = 0;
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    unique1[j1] = true;
+  }
+//
+//  STEP 1:
+//  Compare PERMANENT POINTS to PERMANENT POINTS.
+//
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    if ( unique1[indx1[j1]] )
+    {
+      *unique_num1 = *unique_num1 + 1;
+
+      hi = j1;
+
+      while ( hi < n1 - 1 )
+      {
+        if ( r1[indx1[j1]] + tol < r1[indx1[hi+1]] )
+        {
+          break;
+        }
+        hi = hi + 1;
+      }
+
+      for ( k1 = j1 + 1; k1 <= hi; k1++ )
+      {
+        if ( unique1[indx1[k1]] )
+        {
+          dist = 0.0;
+          for ( i = 0; i < m; i++ )
+          {
+            dist = dist + std::pow ( a1[i+indx1[j1]*m] - a1[i+indx1[k1]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+
+          if ( dist <= tol )
+          {
+            unique1[indx1[k1]] = false;
+          }
+        }
+      }
+    }
+  }
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void point_radial_tol_unique_count_inc2 ( int m, int n1, double a1[], int n2,
+  double a2[], double tol, double z[], double r1[], int indx1[], bool unique1[],
+  int *unique_num2 )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_COUNT_INC2 counts the tolerably unique points.
+//
+//  Discussion:
+//
+//    The input data includes an M x N1 array A1 and an M x N2 array A2,
+//    representing the M-dimensional coordinates of a set of N1
+//    "permanent" points and N2 "temporary" points.
+//
+//    This is an "incremental" version of POINT_RADIAL_TOL_UNIQUE_COUNT.
+//
+//    This means that we want to identify the tolerably unique points
+//    among the permanent points before processing the temporary points.
+//
+//    If many sets of temporary data are considered, this function will
+//    do a lot of unnecessary work resorting the permanent data; it would
+//    be possible to avoid repetitions of that work at the expense of saving
+//    various work vectors.  This function accepts the overhead of the
+//    repeated calculations for the benefit of only having to "remember"
+//    the number of unique points discovered.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N1, the number of permanent points.
+//
+//    Input, double A1[M*N1], the permanent points.
+//
+//    Input, int N2, the number of temporary points.
+//
+//    Input, double A2[M*N2], the temporary points.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Input, double Z[M], a random base vector used to
+//    linearly sort the data.
+//
+//    Input, double R1[N1], the scalar values assigned to
+//    the data for sorting.
+//
+//    Input, int INDX1[N1], the ascending sort index
+//    for A1.
+//
+//    Input, bool UNIQUE1[N1], is TRUE for each unique permanent point.
+//
+//    Output, int *UNIQUE_NUM2, the number of additional
+//    tolerably unique points if the temporary points are included.
+//
+{
+  double dist;
+  int hi;
+  int i;
+  int *indx2;
+  int j;
+  int j1;
+  int j2;
+  int j2_hi;
+  int j2_lo;
+  int k1;
+  int k2;
+  double r_hi;
+  double r_lo;
+  double *r2;
+  bool *unique2;
+//
+//  Initialize the temporary point data.
+//
+  r2 = new double[n2];
+  for ( j2 = 0; j2 < n2; j2++ )
+  {
+    r2[j2] = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      r2[j2] = r2[j2] + std::pow ( a2[i+j2*m] - z[i], 2 );
+    }
+    r2[j2] = std::sqrt ( r2[j2] );
+  }
+
+  indx2 = new int[n2];
+  webbur::r8vec_sort_heap_index_a ( n2, r2, indx2 );
+
+  unique2 = new bool[n2];
+  for ( j2 = 0; j2 < n2; j2++ )
+  {
+    unique2[j2] = true;
+  }
+
+  *unique_num2 = 0;
+//
+//  STEP 2:
+//  Use PERMANENT points to eliminate TEMPORARY points.
+//
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    if ( unique1[indx1[j1]] )
+    {
+      r_lo = r1[indx1[j1]] - tol;
+      r_hi = r1[indx1[j1]] + tol;
+
+      webbur::r8vec_index_sorted_range ( n2, r2, indx2, r_lo, r_hi,
+        &j2_lo, &j2_hi );
+
+      for ( j2 = j2_lo; j2 <= j2_hi; j2++ )
+      {
+        if ( unique2[indx2[j2]] )
+        {
+          dist = 0.0;
+          for ( i = 0; i < m; i++ )
+          {
+            dist = dist + std::pow ( a1[i+indx1[j1]*m]
+                                   - a2[i+indx2[j2]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+          if ( dist <= tol )
+          {
+            unique2[indx2[j2]] = false;
+          }
+        }
+      }
+    }
+  }
+//
+//  STEP 3:
+//  Use TEMPORARY points to eliminate TEMPORARY points.
+//
+  for ( j2 = 0; j2 < n2; j2++ )
+  {
+    if ( unique2[indx2[j2]] )
+    {
+      *unique_num2 = *unique_num2 + 1;
+
+      hi = j2;
+
+      while ( hi < n2 - 1 )
+      {
+        if ( r2[indx2[j2]] + tol < r2[indx2[hi+1]] )
+        {
+          break;
+        }
+        hi = hi + 1;
+      }
+
+      for ( k2 = j2 + 1; k2 <= hi; k2++ )
+      {
+        if ( unique2[indx2[k2]] )
+        {
+          dist = 0.0;
+          for ( i = 0; i < m; i++ )
+          {
+            dist = dist + std::pow ( a2[i+indx2[j2]*m] - a2[i+indx2[k2]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+
+          if ( dist <= tol )
+          {
+            unique2[indx2[k2]] = false;
+          }
+        }
+      }
+    }
+  }
+  delete [] indx2;
+  delete [] r2;
+  delete [] unique2;
+
+  return;
+}
+//****************************************************************************80
+
+int point_radial_tol_unique_index ( int m, int n, double a[], double tol,
+  int *seed, int undx[], int xdnu[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_INDEX indexes the tolerably unique points.
+//
+//  Discussion:
+//
+//    The input data is an M x N array A, representing the M-dimensional
+//    coordinates of N points.
+//
+//    The output is:
+//    * the number of tolerably unique points in the list;
+//    * the index, in the list of unique items, of the representatives
+//      of each point;
+//    * the index, in A, of the tolerably unique representatives.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    28 July 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N, the number of columns.
+//
+//    Input, double A[M*N], the array of N columns of data.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Input/output, int SEED, a seed for the random
+//    number generator.
+//
+//    Output, int UNDX[UNIQUE_NUM], the index, in A, of the
+//    tolerably unique points.
+//
+//    Output, int XDNU[N], the index, in UNDX, of the
+//    tolerably unique point that "represents" this point.
+//
+//    Output, int POINT_RADIAL_TOL_UNIQUE_INDEX, the number of tolerably
+//    unique points.
+//
+{
+  double dist;
+  int hi;
+  int i;
+  int *indx;
+  int j;
+  int k;
+  double *r;
+  bool *unique;
+  int unique_num;
+  double *w;
+  double w_sum;
+  double *z;
+
+  if ( n <= 0 )
+  {
+    unique_num = 0;
+    return unique_num;
+  }
+//
+//  Assign a base point Z randomly in the convex hull.
+//
+  w = webbur::r8vec_uniform_01_new ( n, seed );
+  w_sum = webbur::r8vec_sum ( n, w );
+  for ( j = 0; j < n; j++ )
+  {
+    w[j] = w[j] / w_sum;
+  }
+
+  z = new double[m];
+  for ( i = 0; i < m; i++ )
+  {
+    z[i] = 0.0;
+    for ( j = 0; j < n; j++ )
+    {
+      z[i] = z[i] + a[i+j*m] * w[j];
+    }
+  }
+//
+//  Compute the radial distance R of each point to Z.
+//
+  r = new double[n];
+
+  for ( j = 0; j < n; j++ )
+  {
+    r[j] = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      r[j] = r[j] + std::pow ( a[i+j*m] - z[i], 2 );
+    }
+    r[j] = std::sqrt ( r[j] );
+  }
+//
+//  Implicitly sort the R array.
+//
+  indx = webbur::r8vec_sort_heap_index_a_new ( n, r );
+//
+//  To determine if a point I is tolerably unique, we only have to check
+//  whether it is distinct from all points J such that R(I) <= R(J) <= R(J)+TOL.
+//
+  unique_num = 0;
+
+  unique = new bool[n];
+  for ( i = 0; i < n; i++ )
+  {
+    unique[i] = true;
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    if ( unique[indx[i]] )
+    {
+//
+//  Point INDX(I) is unique, in that no earlier point is near it.
+//
+      xdnu[indx[i]] = unique_num;
+      undx[unique_num] = indx[i];
+      unique_num = unique_num + 1;
+//
+//  Look for later points which are close to point INDX(I)
+//  in terms of R.
+//
+      hi = i;
+
+      while ( hi < n - 1 )
+      {
+        if ( r[indx[i]] + tol < r[indx[hi+1]] )
+        {
+          break;
+        }
+        hi = hi + 1;
+      }
+//
+//  Points INDX(I+1) through INDX(HI) have an R value close to
+//  point INDX(I).  Are they truly close to point INDEX(I)?
+//
+      for ( j = i + 1; j <= hi; j++ )
+      {
+        if ( unique[indx[j]] )
+        {
+          dist = 0.0;
+          for ( k = 0; k < m; k++ )
+          {
+            dist = dist + std::pow ( a[k+indx[i]*m] - a[k+indx[j]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+
+          if ( dist <= tol )
+          {
+            unique[indx[j]] = false;
+            xdnu[indx[j]] = xdnu[indx[i]];
+          }
+        }
+      }
+    }
+  }
+
+  delete [] indx;
+  delete [] r;
+  delete [] unique;
+  delete [] w;
+  delete [] z;
+
+  return unique_num;
+}
+//****************************************************************************80
+
+void point_radial_tol_unique_index_inc1 ( int m, int n1, double a1[],
+  double tol, int *seed, double z[], double r1[], int indx1[], bool unique1[],
+  int *unique_num1, int undx1[], int xdnu1[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_INDEX_INC1 indexes the tolerably unique points.
+//
+//  Discussion:
+//
+//    The input data includes an M x N1 array A1 of
+//    "permanent" points.
+//
+//    This is a two step version of POINT_RADIAL_TOL_UNIQUE_INDEX_INC.
+//
+//    The output is:
+//    * the number of tolerably unique points in the list;
+//    * the index, in the list of unique items, of the representatives
+//      of each point;
+//    * the index, in A1, of the tolerably unique representatives.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N1, the number of permanent points.
+//
+//    Input, double A1[M*N1], the permanent points.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Input/output, int *SEED, a seed for the random
+//    number generator.
+//
+//    Output, double Z[M], a random base vector used to
+//    linearly sort the data.
+//
+//    Output, double R1[N1], the scalar values assigned to
+//    the data for sorting.
+//
+//    Output, int INDX1[N1], the ascending sort index for A1.
+//
+//    Output, bool UNIQUE1[N1], is TRUE for unique permanent points.
+//
+//    Output, int *UNIQUE_NUM1, the number of tolerably unique points
+//    with just the permanent points.
+//
+//    Output, int UNDX1[UNIQUE_NUM1], the index, in A1, of the tolerably
+//    unique points.
+//
+//    Output, int XDNU1[N1], the index, in UNDX1, of the tolerably unique
+//    point that "represents" this point.
+//
+{
+  double dist;
+  int hi;
+  int i;
+  int j;
+  int j1;
+  int k1;
+  double *w;
+  double w_sum;
+//
+//  Assign a base point Z randomly in the convex hull of the permanent points.
+//
+  w = webbur::r8vec_uniform_01_new ( n1, seed );
+  w_sum = webbur::r8vec_sum ( n1, w );
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    w[j1] = w[j1] / w_sum;
+  }
+
+  for ( i = 0; i < m; i++ )
+  {
+    z[i] = 0.0;
+    for ( j1 = 0; j1 < n1; j1++ )
+    {
+      z[i] = z[i] + a1[i+j1*m] * w[j1];
+    }
+  }
+//
+//  Initialize the permanent point data.
+//
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    r1[j1] = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      r1[j1] = r1[j1] + std::pow ( a1[i+j1*m] - z[i], 2 );
+    }
+    r1[j1] = std::sqrt ( r1[j1] );
+  }
+  webbur::r8vec_sort_heap_index_a ( n1, r1, indx1 );
+
+  *unique_num1 = 0;
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    unique1[j1] = true;
+  }
+//
+//  STEP 1:
+//  Compare PERMANENT POINTS to PERMANENT POINTS.
+//
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    if ( unique1[indx1[j1]] )
+    {
+      xdnu1[indx1[j1]] = *unique_num1;
+      undx1[*unique_num1] = indx1[j1];
+      *unique_num1 = *unique_num1 + 1;
+
+      hi = j1;
+
+      while ( hi < n1 - 1 )
+      {
+        if ( r1[indx1[j1]] + tol < r1[indx1[hi+1]] )
+        {
+          break;
+        }
+        hi = hi + 1;
+      }
+
+      for ( k1 = j1 + 1; k1 <= hi; k1++ )
+      {
+        if ( unique1[indx1[k1]] )
+        {
+          dist = 0.0;
+          for ( i = 0; i < m; i++ )
+          {
+            dist = dist + std::pow ( a1[i+indx1[j1]*m] - a1[i+indx1[k1]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+
+          if ( dist <= tol )
+          {
+            unique1[indx1[k1]] = false;
+            xdnu1[indx1[k1]] = xdnu1[indx1[j1]];
+          }
+        }
+      }
+    }
+  }
+
+  delete [] w;
+
+  return;
+}
+//****************************************************************************80
+
+void point_radial_tol_unique_index_inc2 ( int m, int n1, double a1[], int n2,
+  double a2[], double tol, double z[], double r1[], int indx1[], bool unique1[],
+  int unique_num1, int undx1[], int xdnu1[], double r2[],
+  int indx2[], bool unique2[], int *unique_num2, int undx2[], int xdnu2[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_INDEX_INC2 indexes unique temporary points.
+//
+//  Discussion:
+//
+//    The input data includes an M x N1 array A1 and an M x N2 array A2,
+//    representing the M-dimensional coordinates of a set of N1
+//    "permanent" points and N2 "temporary" points.
+//
+//    For notation, we use "A" to describe the M x (N1+N2) array that would be
+//    formed by starting with A1 and appending A2.
+//
+//    The output is:
+//    * the number of tolerably unique points in the list;
+//    * the index, in the list of unique items, of the representatives
+//      of each point;
+//    * the index, in A, of the tolerably unique representatives.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N1, the number of permanent points.
+//
+//    Input, double A1[M*N1], the permanent points.
+//
+//    Input, int N2, the number of temporary points.
+//
+//    Input, double A2[M*N2], the temporary points.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Input, double Z[M], a random base vector used to
+//    linearly sort the data.
+//
+//    Input, double R1[N1], the scalar values assigned to
+//    A1 for sorting.
+//
+//    Input, int INDX1[N1], the ascending sort index for A1.
+//
+//    Input, bool UNIQUE1[N1], is TRUE for unique permanent points.
+//
+//    Input, int UNIQUE_NUM1, the number of tolerably unique permanent points.
+//
+//    Input, int UNDX1[UNIQUE_NUM1],
+//    the index in A1 of the tolerably unique permanent points.
+//
+//    Input, int XDNU1[N1], the index in UNDX1
+//    of the tolerably unique permanent point that "represents" this point.
+//
+//    Output, double R2[N2], the scalar values assigned to
+//    A2 for sorting.
+//
+//    Output, int INDX2[N2], the ascending sort index for A2.
+//
+//    Output, bool UNIQUE2[N2], is TRUE for unique temporary points.
+//
+//    Output, int *UNIQUE_NUM2, the number
+//    of tolerably unique temporary points.
+//
+//    Output, int UNDX2[UNIQUE_NUM2],
+//    the index in A2 of the tolerably unique points, incremented by N1.
+//
+//    Output, int XDNU2[N2], the index, in UNDX1
+//    or UNDX2, of the tolerably unique point that "represents" this
+//    temporary point.  If the value represents an index in UNDX2, this
+//    can be inferred by the fact that its value is greater than or
+//    equal to UNIQUE_NUM1.  To reference UNDX2, the value should then be
+//    decremented by UNIQUE_NUM1.
+//
+{
+  double dist;
+  int hi;
+  int i;
+  int j;
+  int j1;
+  int j2;
+  int j2_hi;
+  int j2_lo;
+  int k1;
+  int k2;
+  double r_hi;
+  double r_lo;
+//
+//  Initialize the temporary point data.
+//
+  for ( j2 = 0; j2 < n2; j2++ )
+  {
+    r2[j2] = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      r2[j2] = r2[j2] + std::pow ( a2[i+j2*m] - z[i], 2 );
+    }
+    r2[j2] = std::sqrt ( r2[j2] );
+  }
+
+  webbur::r8vec_sort_heap_index_a ( n2, r2, indx2 );
+
+  for ( j2 = 0; j2 < n2; j2++ )
+  {
+    unique2[j2] = true;
+  }
+
+  *unique_num2 = 0;
+//
+//  STEP 2:
+//  Use PERMANENT points to eliminate TEMPORARY points.
+//
+  for ( j1 = 0; j1 < n1; j1++ )
+  {
+    if ( unique1[indx1[j1]] )
+    {
+      r_lo = r1[indx1[j1]] - tol;
+      r_hi = r1[indx1[j1]] + tol;
+
+      webbur::r8vec_index_sorted_range ( n2, r2, indx2, r_lo, r_hi,
+        &j2_lo, &j2_hi );
+
+      for ( j2 = j2_lo; j2 <= j2_hi; j2++ )
+      {
+        if ( unique2[indx2[j2]] )
+        {
+          dist = 0.0;
+          for ( i = 0; i < m; i++ )
+          {
+            dist = dist + std::pow ( a1[i+indx1[j1]*m]
+                                   - a2[i+indx2[j2]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+          if ( dist <= tol )
+          {
+            unique2[indx2[j2]] = false;
+            xdnu2[indx2[j2]] = xdnu1[indx1[j1]];
+          }
+        }
+      }
+    }
+  }
+//
+//  STEP 3:
+//  Use TEMPORARY points to eliminate TEMPORARY points.
+//
+  for ( j2 = 0; j2 < n2; j2++ )
+  {
+    if ( unique2[indx2[j2]] )
+    {
+      xdnu2[indx2[j2]] = unique_num1 + *unique_num2;
+      undx2[*unique_num2] = indx2[j2] + n1;
+      *unique_num2 = *unique_num2 + 1;
+
+      hi = j2;
+
+      while ( hi < n2 - 1 )
+      {
+        if ( r2[indx2[j2]] + tol < r2[indx2[hi+1]] )
+        {
+          break;
+        }
+        hi = hi + 1;
+      }
+
+      for ( k2 = j2 + 1; k2 <= hi; k2++ )
+      {
+        if ( unique2[indx2[k2]] )
+        {
+          dist = 0.0;
+          for ( i = 0; i < m; i++ )
+          {
+            dist = dist + std::pow ( a2[i+indx2[j2]*m] - a2[i+indx2[k2]*m], 2 );
+          }
+          dist = std::sqrt ( dist );
+
+          if ( dist <= tol )
+          {
+            unique2[indx2[k2]] = false;
+            xdnu2[indx2[k2]] = xdnu2[indx2[j2]];
+          }
+        }
+      }
+    }
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void point_radial_tol_unique_index_inc3 ( int m, int n1, double a1[],
+  double r1[], int indx1[], bool unique1[], int unique_num1, int undx1[],
+  int xdnu1[], int n2, double a2[], double r2[], int indx2[], bool unique2[],
+  int unique_num2, int undx2[], int xdnu2[], int *n3, double a3[], double r3[],
+  int indx3[], bool unique3[], int *unique_num3, int undx3[], int xdnu3[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_RADIAL_TOL_UNIQUE_INDEX_INC3 merges index data.
+//
+//  Discussion:
+//
+//    This function may be called after *INDEX_INC1 has created index
+//    information for the permanent data, and *INDEX_INC2 has created
+//    augmenting information for a set of temporary data which now is
+//    to be merged with the permanent data.
+//
+//    The function merges the data and index information to create a
+//    new "permanent" data set.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows.
+//
+//    Input, int N1, the number of permanent points.
+//
+//    Input, double A1[M*N1], the permanent points.
+//
+//    Input, double R1[N1], the scalar values assigned to
+//    the data for sorting.
+//
+//    Input, int INDX1[N1], the ascending sort index
+//    for A1.
+//
+//    Input, bool UNIQUE1[N1], is TRUE for each unique permanent point.
+//
+//    Input, int UNIQUE_NUM1, the number
+//    of tolerably unique points with just the permanent points.
+//
+//    Input, int UNDX1[UNIQUE_NUM1],
+//    the index in A1 of the tolerably unique points.
+//
+//    Input, int XDNU1[N1], the index in UNDX1
+//    of the tolerably unique point that "represents" this point.
+//
+//    Input, int N2, the number of temporary points.
+//
+//    Input, double A2[M,N2], the temporary points.
+//
+//    Input, double R2[N2], the scalar values assigned to
+//    the data for sorting.
+//
+//    Input, int INDX2[N2], the ascending sort index
+//    for A2.
+//
+//    Input, bool UNIQUE2[N2], is TRUE for each unique temporary point.
+//
+//    Input, int UNIQUE_NUM2, the number
+//    of tolerably unique temporary points.
+//
+//    Input, int UNDX2[UNIQUE_NUM2],
+//    the index in A2 of the tolerably unique points, incremented by UNIQUE_NUM1.
+//
+//    Input, int XDNU2[N2], the index in UNDX1 or UNDX2
+//    of the tolerably unique point that "represents" this point.
+//
+//    Output, int *N3, the number of permanent points.
+//
+//    Output, double A3[M,N3], the permanent points.
+//
+//    Output, double R3[N3], the scalar values assigned to
+//    the data for sorting.
+//
+//    Output, int INDX3[N3], the ascending sort index
+//    for A3.
+//
+//    Output, bool UNIQUE3[N3], is TRUE for each unique permanent point.
+//
+//    Output, int *UNIQUE_NUM3, the number
+//    of tolerably unique points.
+//
+//    Output, int UNDX3[UNIQUE_NUM3],
+//    the index in A3 of the tolerably unique points.
+//
+//    Output, int XDNU3[N3], the index in UNDX3
+//    of the tolerably unique point that "represents" this point.
+//
+{
+  int i;
+  int i1;
+  int i2;
+  int i3;
+  double v1;
+  double v2;
+
+  *n3 = n1 + n2;
+
+  for ( i1 = 0; i1 < n1; i1++ )
+  {
+    for ( i = 0; i < m; i++ )
+    {
+      a3[i+i1*m] = a1[i+i1*m];
+    }
+  }
+  for ( i2 = 0; i2 < n2; i2++ )
+  {
+    i3 = n1 + i2;
+    for ( i = 0; i < m; i++ )
+    {
+      a3[i+i3*m] = a2[i+i2*m];
+    }
+  }
+  for ( i1 = 0; i1 < n1; i1++ )
+  {
+    r3[i1]= r1[i1];
+  }
+  for ( i2 = 0; i2 < n2; i2++ )
+  {
+    i3 = n1 + i2;
+    r3[i3] = r2[i2];
+  }
+//
+//  Interleave the two INDX arrays so that INDX3 presents the entries
+//  of A3 in ascending R3 order.
+//
+  i1 = 0;
+  i2 = 0;
+
+  for ( i3 = 0; i3 < *n3; i3++ )
+  {
+    if ( i1 < n1 )
+    {
+      v1 = r1[indx1[i1]];
+    }
+    else
+    {
+      v1 = r8_huge ( );
+    }
+
+    if ( i2 < n2 )
+    {
+      v2 = r2[indx2[i2]];
+    }
+    else
+    {
+      v2 = r8_huge ( );
+    }
+
+    if ( v1 <= v2 )
+    {
+      indx3[i3] = indx1[i1];
+      i1 = i1 + 1;
+    }
+    else
+    {
+      indx3[i3] = indx2[i2] + n1;
+      i2 = i2 + 1;
+    }
+  }
+
+  *unique_num3 = unique_num1 + unique_num2;
+
+  for ( i1 = 0; i1 < n1; i1++ )
+  {
+    unique3[i1] = unique1[i1];
+  }
+  for ( i2 = 0; i2 < n2; i2++ )
+  {
+    i3 = n1 + i2;
+    unique3[i3] = unique2[i2];
+  }
+//
+//  The entries in UNDX2 were already incremented by N2 if they pointed
+//  to an entry of A2, so all entries in UNDX2 correctly index A3.
+//
+  for ( i1 = 0; i1 < unique_num1; i1++ )
+  {
+    undx3[i1] = undx1[i1];
+  }
+  for ( i2 = 0; i2 < unique_num2; i2++ )
+  {
+    i3 = unique_num1 + i2;
+    undx3[i3] = undx2[i2];
+  }
+//
+//  Note that the entries of XDNU2 were already incremented by N2
+//  so that they correctly index A3, not A2.
+//
+  for ( i1 = 0; i1 < n1; i1++ )
+  {
+    xdnu3[i1] = xdnu1[i1];
+  }
+  for ( i2 = 0; i2 < n2; i2++ )
+  {
+    i3 = n1 + i2;
+    xdnu3[i3] = xdnu2[i2];
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void point_unique_index ( int m, int n, double a[], int unique_num, int undx[],
+  int xdnu[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    POINT_UNIQUE_INDEX indexes unique points.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    The goal of this routine is to determine a vector UNDX,
+//    which points to the unique elements of A, in sorted order,
+//    and a vector XDNU, which identifies, for each entry of A, the index of
+//    the unique sorted element of A.
+//
+//    This is all done with index vectors, so that the elements of
+//    A are never moved.
+//
+//    The first step of the algorithm requires the indexed sorting
+//    of A, which creates arrays INDX and XDNI.  (If all the entries
+//    of A are unique, then these arrays are the same as UNDX and XDNU.)
+//
+//    We then use INDX to examine the entries of A in sorted order,
+//    noting the unique entries, creating the entries of XDNU and
+//    UNDX as we go.
+//
+//    Once this process has been completed, the vector A could be
+//    replaced by a compressed vector XU, containing the unique entries
+//    of A in sorted order, using the formula
+//
+//      XU(*) = A(UNDX(*)).
+//
+//    We could then, if we wished, reconstruct the entire vector A, or
+//    any element of it, by index, as follows:
+//
+//      A(I) = XU(XDNU(I)).
+//
+//    We could then replace A by the combination of XU and XDNU.
+//
+//    Later, when we need the I-th entry of A, we can locate it as
+//    the XDNU(I)-th entry of XU.
+//
+//    Here is an example of a vector A, the sort and inverse sort
+//    index vectors, and the unique sort and inverse unique sort vectors
+//    and the compressed unique sorted vector.
+//
+//      I     A  Indx  Xdni       XU  Undx  Xdnu
+//    ----+-----+-----+-----+--------+-----+-----+
+//      0 | 11.     0     0 |    11.     0     0
+//      1 | 22.     2     4 |    22.     1     1
+//      2 | 11.     5     1 |    33.     3     0
+//      3 | 33.     8     7 |    55.     4     2
+//      4 | 55.     1     8 |                  3
+//      5 | 11.     6     2 |                  0
+//      6 | 22.     7     5 |                  1
+//      7 | 22.     3     6 |                  1
+//      8 | 11.     4     3 |                  0
+//
+//    INDX(2) = 3 means that sorted item(2) is A(3).
+//    XDNI(2) = 5 means that A(2) is sorted item(5).
+//
+//    UNDX(3) = 4 means that unique sorted item(3) is at A(4).
+//    XDNU(8) = 2 means that A(8) is at unique sorted item(2).
+//
+//    XU(XDNU(I))) = A(I).
+//    XU(I)        = A(UNDX(I)).
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 July 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the dimension of the data values.
+//
+//    Input, int N, the number of data values,
+//
+//    Input, double A[M*N], the data values.
+//
+//    Input, int UNIQUE_NUM, the number of unique values in A.
+//    This value is only required for languages in which the size of
+//    UNDX must be known in advance.
+//
+//    Output, int UNDX[UNIQUE_NUM], the UNDX vector.
+//
+//    Output, int XDNU[N], the XDNU vector.
+//
+{
+  double diff;
+  int i;
+  int *indx;
+  int j;
+  int k;
+//
+//  Implicitly sort the array.
+//
+  indx = webbur::r8col_sort_heap_index_a ( m, n, a );
+//
+//  Walk through the implicitly sorted array.
+//
+  i = 0;
+
+  j = 0;
+  undx[j] = indx[i];
+
+  xdnu[indx[i]] = j;
+
+  for ( i = 1; i < n; i++ )
+  {
+    diff = 0.0;
+    for ( k = 0; k < m; k++ )
+    {
+      diff = webbur::r8_max ( diff,
+        webbur::r8_abs ( a[k+indx[i]*m] - a[k+undx[j]*m] ) );
+    }
+    if ( 0.0 < diff )
+    {
+      j = j + 1;
+      undx[j] = indx[i];
+    }
+    xdnu[indx[i]] = j;
+  }
+  delete [] indx;
+
+  return;
+}
+//****************************************************************************80
+
+void product_mixed_weight ( int dim_num, int order_1d[], int order_nd,
+  int rule[], double alpha[], double beta[], double weight_nd[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    PRODUCT_MIXED_WEIGHT computes the weights of a mixed product rule.
+//
+//  Discussion:
+//
+//    This routine computes the weights for a quadrature rule which is
+//    a product of 1D rules of varying order and kind.
+//
+//    The user must preallocate space for the output array WEIGHT_ND.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    11 February 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int ORDER_1D[DIM_NUM], the order of the 1D rules.
+//
+//    Input, int ORDER_ND, the order of the product rule.
+//
+//    Input, int RULE[DIM_NUM], the rule in each dimension.
+//     1, "CC",  Clenshaw Curtis, Closed Fully Nested rule.
+//     2, "F2",  Fejer Type 2, Open Fully Nested rule.
+//     3, "GP",  Gauss Patterson, Open Fully Nested rule.
+//     4, "GL",  Gauss Legendre, Open Weakly Nested rule.
+//     5, "GH",  Gauss Hermite, Open Weakly Nested rule.
+//     6, "GGH", Generalized Gauss Hermite, Open Weakly Nested rule.
+//     7, "LG",  Gauss Laguerre, Open Non Nested rule.
+//     8, "GLG", Generalized Gauss Laguerre, Open Non Nested rule.
+//     9, "GJ",  Gauss Jacobi, Open Non Nested rule.
+//    10, "GW",  Golub Welsch, (presumed) Open Non Nested rule.
+//    11, "CC_SE", Clenshaw Curtis Slow Exponential, Closed Fully Nested rule.
+//    12, "F2_SE", Fejer Type 2 Slow Exponential, Open Fully Nested rule.
+//    13, "GP_SE", Gauss Patterson Slow Exponential, Open Fully Nested rule.
+//    14, "CC_ME", Clenshaw Curtis Moderate Exponential, Closed Fully Nested rule.
+//    15, "F2_ME", Fejer Type 2 Moderate Exponential, Open Fully Nested rule.
+//    16, "GP_ME", Gauss Patterson Moderate Exponential, Open Fully Nested rule.
+//    17, "CCN", Clenshaw Curtis Nested, Linear, Closed Fully Nested rule.
+//
+//    Input, double ALPHA[DIM_NUM], BETA[DIM_NUM], parameters used for
+//    Generalized Gauss Hermite, Generalized Gauss Laguerre,
+//    and Gauss Jacobi rules.
+//
+//    Output, double WEIGHT_ND[ORDER_ND], the product rule weights.
+//
+{
+  int dim;
+  int i;
+  double *weight_1d;
+
+  for ( i = 0; i < order_nd; i++ )
+  {
+    weight_nd[i] = 1.0;
+  }
+
+  for ( dim = 0; dim < dim_num; dim++ )
+  {
+    weight_1d = new double[order_1d[dim]];
+
+    if ( rule[dim] == 1 )
+    {
+      webbur::clenshaw_curtis_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 2 )
+    {
+      webbur::fejer2_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 3 )
+    {
+      webbur::patterson_lookup_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 4 )
+    {
+      webbur::legendre_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 5 )
+    {
+      webbur::hermite_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 6 )
+    {
+      webbur::gen_hermite_compute_weights ( order_1d[dim], alpha[dim], weight_1d );
+    }
+    else if ( rule[dim] == 7 )
+    {
+      webbur::laguerre_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 8 )
+    {
+      webbur::gen_laguerre_compute_weights ( order_1d[dim], alpha[dim], weight_1d );
+    }
+    else if ( rule[dim] == 9 )
+    {
+      webbur::jacobi_compute_weights ( order_1d[dim], alpha[dim], beta[dim], weight_1d );
+    }
+    else if ( rule[dim] == 10 )
+    {
+      std::cerr << "\n";
+      std::cerr << "PRODUCT_MIXED_WEIGHT - Fatal error!\n";
+      std::cerr << "  Do not know how to set weights for rule 10.\n";
+      std::exit ( 1 );
+    }
+    else if ( rule[dim] == 11 )
+    {
+      webbur::clenshaw_curtis_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 12 )
+    {
+      webbur::fejer2_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 13 )
+    {
+      webbur::patterson_lookup_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 14 )
+    {
+      webbur::clenshaw_curtis_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 15 )
+    {
+      webbur::fejer2_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 16 )
+    {
+      webbur::patterson_lookup_weights ( order_1d[dim], weight_1d );
+    }
+    else if ( rule[dim] == 17 )
+    {
+      webbur::ccn_compute_weights ( order_1d[dim], weight_1d );
+    }
+    else
+    {
+      std::cerr << "\n";
+      std::cerr << "PRODUCT_MIXED_WEIGHT - Fatal error!\n";
+      std::cerr << "  Unexpected value of RULE[" << dim << "] = "
+           << rule[dim] << ".\n";
+      std::exit ( 1 );
+    }
+
+    webbur::r8vec_direct_product2 ( dim, order_1d[dim], weight_1d,
+      dim_num, order_nd, weight_nd );
+
+    delete [] weight_1d;
+  }
+  return;
+}
+//****************************************************************************80
+
+double r8_abs ( double x )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_ABS returns the absolute value of an R8.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double X, the quantity whose absolute value is desired.
+//
+//    Output, double R8_ABS, the absolute value of X.
+//
+{
+  double value;
+
+  if ( 0.0 <= x )
+  {
+    value = x;
+  }
+  else
+  {
+    value = -x;
+  }
+  return value;
+}
+//****************************************************************************80
+
+double r8_ceiling ( double x )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_CEILING rounds an R8 "up" (towards +oo) to the next integer.
+//
+//  Example:
+//
+//    X        R8_CEILING(X)
+//
+//   -1.1      -1.0
+//   -1.0      -1.0
+//   -0.9       0.0
+//   -0.1       0.0
+//    0.0       0.0
+//    0.1       1.0
+//    0.9       1.0
+//    1.0       1.0
+//    1.1       2.0
+//    2.9       3.0
+//    3.0       3.0
+//    3.14159   4.0
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 April 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double X, the number whose ceiling is desired.
+//
+//    Output, double R8_CEILING, the ceiling of X.
+//
+{
+  double value;
+
+  value = ( int ) x;
+
+  if ( value < x )
+  {
+    value = value + 1.0;
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_choose ( int n, int k )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_CHOOSE computes the binomial coefficient C(N,K) as an R8.
+//
+//  Discussion:
+//
+//    The value is calculated in such a way as to avoid overflow and
+//    roundoff.  The calculation is done in R8 arithmetic.
+//
+//    The formula used is:
+//
+//      C(N,K) = N! / ( K! * (N-K)! )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 March 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    ML Wolfson, HV Wright,
+//    Algorithm 160:
+//    Combinatorial of M Things Taken N at a Time,
+//    Communications of the ACM,
+//    Volume 6, Number 4, April 1963, page 161.
+//
+//  Parameters:
+//
+//    Input, int N, K, the values of N and K.
+//
+//    Output, double R8_CHOOSE, the number of combinations of N
+//    things taken K at a time.
+//
+{
+  int i;
+  int mn;
+  int mx;
+  int value;
+
+  mn = webbur::i4_min ( k, n - k );
+
+  if ( mn < 0 )
+  {
+    value = 0.0;
+  }
+  else if ( mn == 0 )
+  {
+    value = 1.0;
+  }
+  else
+  {
+    mx = webbur::i4_max ( k, n - k );
+    value = ( double ) ( mx + 1 );
+
+    for ( i = 2; i <= mn; i++ )
+    {
+      value = ( value * ( double ) ( mx + i ) ) / ( double ) i;
+    }
+  }
+  return value;
+}
+//****************************************************************************80
+
+double r8_epsilon ( )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_EPSILON returns the R8 roundoff unit.
+//
+//  Discussion:
+//
+//    The roundoff unit is a number R which is a power of 2 with the
+//    property that, to the precision of the computer's arithmetic,
+//      1 < 1 + R
+//    but
+//      1 = ( 1 + R / 2 )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 February 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Output, double R8_EPSILON, the R8 round-off unit.
+//
+{
+  double value;
+
+  value = 1.0;
+
+  while ( 1.0 < ( double ) ( 1.0 + value )  )
+  {
+    value = value / 2.0;
+  }
+
+  value = 2.0 * value;
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_factorial ( int n )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_FACTORIAL computes the factorial of N.
+//
+//  Discussion:
+//
+//    factorial ( N ) = product ( 1 <= I <= N ) I
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    16 January 1999
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the argument of the factorial function.
+//    If N is less than 1, the function value is returned as 1.
+//
+//    Output, double R8_FACTORIAL, the factorial function.
+//
+{
+  int i;
+  double value;
+
+  value = 1.0;
+
+  for ( i = 1; i <= n; i++ )
+  {
+    value = value * ( double ) ( i );
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_factorial2 ( int n )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_FACTORIAL2 computes the double factorial function.
+//
+//  Discussion:
+//
+//    FACTORIAL2( N ) = Product ( N * (N-2) * (N-4) * ... * 2 )  (N even)
+//                    = Product ( N * (N-2) * (N-4) * ... * 1 )  (N odd)
+//
+//  Example:
+//
+//     N    FACTORIAL2(N)
+//
+//     0     1
+//     1     1
+//     2     2
+//     3     3
+//     4     8
+//     5    15
+//     6    48
+//     7   105
+//     8   384
+//     9   945
+//    10  3840
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 January 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the argument of the double factorial
+//    function.  If N is less than 1, R8_FACTORIAL2 is returned as 1.0.
+//
+//    Output, double R8_FACTORIAL2, the double factorial function.
+//
+{
+  int n_copy;
+  double value;
+
+  value = 1.0;
+
+  if ( n < 1 )
+  {
+    return value;
+  }
+
+  n_copy = n;
+
+  while ( 1 < n_copy )
+  {
+    value = value * ( double ) n_copy;
+    n_copy = n_copy - 2;
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_floor ( double x )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_FLOOR rounds an R8 "down" (towards -infinity) to the next integer.
+//
+//  Example:
+//
+//    X        R8_FLOOR(X)
+//
+//   -1.1      -2.0
+//   -1.0      -1.0
+//   -0.9      -1.0
+//   -0.1      -1.0
+//    0.0       0.0
+//    0.1       0.0
+//    0.9       0.0
+//    1.0       1.0
+//    1.1       1.0
+//    2.9       2.0
+//    3.0       3.0
+//    3.14159   3.0
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    15 April 2007
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double X, the number whose floor is desired.
+//
+//    Output, double R8_FLOOR, the floor of X.
+//
+{
+  double value;
+
+  value = ( int ) x;
+
+  if ( x < value )
+  {
+    value = value - 1.0;
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_gamma ( double x )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_GAMMA evaluates Gamma(X) for a real argument.
+//
+//  Discussion:
+//
+//    This routine calculates the gamma function for a real argument X.
+//
+//    Computation is based on an algorithm outlined in reference 1.
+//    The program uses rational functions that approximate the gamma
+//    function to at least 20 significant decimal digits.  Coefficients
+//    for the approximation over the interval (1,2) are unpublished.
+//    Those for the approximation for 12 <= X are from reference 2.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 January 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by William Cody, Laura Stoltz.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    William Cody,
+//    An Overview of Software Development for Special Functions,
+//    in Numerical Analysis Dundee, 1975,
+//    edited by GA Watson,
+//    Lecture Notes in Mathematics 506,
+//    Springer, 1976.
+//
+//    John Hart, Ward Cheney, Charles Lawson, Hans Maehly,
+//    Charles Mesztenyi, John Rice, Henry Thatcher,
+//    Christoph Witzgall,
+//    Computer Approximations,
+//    Wiley, 1968,
+//    LC: QA297.C64.
+//
+//  Parameters:
+//
+//    Input, double X, the argument of the function.
+//
+//    Output, double R8_GAMMA, the value of the function.
+//
+{
+//
+//  Coefficients for minimax approximation over (12, INF).
+//
+  double c[7] = {
+   -1.910444077728E-03,
+    8.4171387781295E-04,
+   -5.952379913043012E-04,
+    7.93650793500350248E-04,
+   -2.777777777777681622553E-03,
+    8.333333333333333331554247E-02,
+    5.7083835261E-03 };
+  double eps = 2.22E-16;
+  double fact;
+  int i;
+  int n;
+  double one = 1.0;
+  double p[8] = {
+  -1.71618513886549492533811E+00,
+   2.47656508055759199108314E+01,
+  -3.79804256470945635097577E+02,
+   6.29331155312818442661052E+02,
+   8.66966202790413211295064E+02,
+  -3.14512729688483675254357E+04,
+  -3.61444134186911729807069E+04,
+   6.64561438202405440627855E+04 };
+  bool parity;
+  double pi = 3.1415926535897932384626434;
+  double q[8] = {
+  -3.08402300119738975254353E+01,
+   3.15350626979604161529144E+02,
+  -1.01515636749021914166146E+03,
+  -3.10777167157231109440444E+03,
+   2.25381184209801510330112E+04,
+   4.75584627752788110767815E+03,
+  -1.34659959864969306392456E+05,
+  -1.15132259675553483497211E+05 };
+  double res;
+  double sqrtpi = 0.9189385332046727417803297;
+  double sum;
+  double twelve = 12.0;
+  double two = 2.0;
+  double value;
+  double xbig = 171.624;
+  double xden;
+  double xinf = 1.79E+308;
+  double xminin = 2.23E-308;
+  double xnum;
+  double y;
+  double y1;
+  double ysq;
+  double z;
+
+  parity = false;
+  fact = one;
+  n = 0;
+  y = x;
+//
+//  Argument is negative.
+//
+  if ( y <= 0.0 )
+  {
+    y = - x;
+    y1 = ( double ) ( int ) ( y );
+    res = y - y1;
+
+    if ( res != 0.0 )
+    {
+      if ( y1 != ( double ) ( int ) ( y1 * 0.5 ) * two )
+      {
+        parity = true;
+      }
+
+      fact = - pi / std::sin ( pi * res );
+      y = y + one;
+    }
+    else
+    {
+      res = xinf;
+      value = res;
+      return value;
+    }
+  }
+//
+//  Argument is positive.
+//
+  if ( y < eps )
+  {
+//
+//  Argument < EPS.
+//
+    if ( xminin <= y )
+    {
+      res = one / y;
+    }
+    else
+    {
+      res = xinf;
+      value = res;
+      return value;
+    }
+  }
+  else if ( y < twelve )
+  {
+    y1 = y;
+//
+//  0.0 < argument < 1.0.
+//
+    if ( y < one )
+    {
+      z = y;
+      y = y + one;
+    }
+//
+//  1.0 < argument < 12.0.
+//  Reduce argument if necessary.
+//
+    else
+    {
+      n = ( int ) ( y ) - 1;
+      y = y - ( double ) ( n );
+      z = y - one;
+    }
+//
+//  Evaluate approximation for 1.0 < argument < 2.0.
+//
+    xnum = 0.0;
+    xden = one;
+    for ( i = 0; i < 8; i++ )
+    {
+      xnum = ( xnum + p[i] ) * z;
+      xden = xden * z + q[i];
+    }
+    res = xnum / xden + one;
+//
+//  Adjust result for case  0.0 < argument < 1.0.
+//
+    if ( y1 < y )
+    {
+      res = res / y1;
+    }
+//
+//  Adjust result for case 2.0 < argument < 12.0.
+//
+    else if ( y < y1 )
+    {
+      for ( i = 1; i <= n; i++ )
+      {
+        res = res * y;
+        y = y + one;
+      }
+    }
+  }
+  else
+  {
+//
+//  Evaluate for 12.0 <= argument.
+//
+    if ( y <= xbig )
+    {
+      ysq = y * y;
+      sum = c[6];
+      for ( i = 0; i < 6; i++ )
+      {
+        sum = sum / ysq + c[i];
+      }
+      sum = sum / y - y + sqrtpi;
+      sum = sum + ( y - 0.5 ) * std::log ( y );
+      res = std::exp ( sum );
+    }
+    else
+    {
+      res = xinf;
+      value = res;
+      return value;
+    }
+  }
+//
+//  Final adjustments and return.
+//
+  if ( parity )
+  {
+    res = - res;
+  }
+
+  if ( fact != one )
+  {
+    res = fact / res;
+  }
+
+  value = res;
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_huge ( )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_HUGE returns a "huge" R8.
+//
+//  Discussion:
+//
+//    The value returned by this function is NOT required to be the
+//    maximum representable R8.  This value varies from machine to machine,
+//    from compiler to compiler, and may cause problems when being printed.
+//    We simply want a "very large" but non-infinite number.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    06 October 2007
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Output, double R8_HUGE, a "huge" R8 value.
+//
+{
+  double value;
+
+  value = 1.0E+30;
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_hyper_2f1 ( double a, double b, double c, double x )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_HYPER_2F1 evaluates the hypergeometric function 2F1(A,B,C,X).
+//
+//  Discussion:
+//
+//    A bug was corrected.  A line which read
+//      c1 = - ( - 1.0, m ) * gc / ( gam * gbm * rm );
+//    was corrected to read
+//      c1 = - std::pow ( - 1.0, m ) * gc / ( gam * gbm * rm );
+//    JVB, 05 July 2009.
+//
+//    A minor bug was corrected.  The HW variable, used in several places as
+//    the "old" value of a quantity being iteratively improved, was not
+//    being initialized.  JVB, 11 February 2008.
+//
+//    The FORTRAN77 original version of this routine is copyrighted by
+//    Shanjie Zhang and Jianming Jin.  However, they give permission to
+//    incorporate this routine into a user program provided that the copyright
+//    is acknowledged.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    05 July 2009
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Shanjie Zhang, Jianming Jin.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    Shanjie Zhang, Jianming Jin,
+//    Computation of Special Functions,
+//    Wiley, 1996,
+//    ISBN: 0-471-11963-6,
+//    LC: QA351.C45
+//
+//  Parameters:
+//
+//    Input, double A, B, C, X, the arguments of the function.
+//    C must not be equal to a nonpositive integer.
+//    X < 1.
+//
+//    Output, double R8_HYPER_2F1, the value of the function.
+//
+{
+  double a0;
+  double aa;
+  double bb;
+  double c0;
+  double c1;
+  double el = 0.5772156649015329;
+  double eps;
+  double f0;
+  double f1;
+  double g0;
+  double g1;
+  double g2;
+  double g3;
+  double ga;
+  double gabc;
+  double gam;
+  double gb;
+  double gbm;
+  double gc;
+  double gca;
+  double gcab;
+  double gcb;
+  double gm;
+  double hf;
+  double hw;
+  int j;
+  int k;
+  bool l0;
+  bool l1;
+  bool l2;
+  bool l3;
+  bool l4;
+  bool l5;
+  int m;
+  int nm;
+  double pa;
+  double pb;
+  double pi = 3.141592653589793;
+  double r;
+  double r0;
+  double r1;
+  double rm;
+  double rp;
+  double sm;
+  double sp;
+  double sp0;
+  double x1;
+
+  l0 = ( c == ( int ) ( c ) ) && ( c < 0.0 );
+  l1 = ( 1.0 - x < 1.0E-15 ) && ( c - a - b <= 0.0 );
+  l2 = ( a == ( int ) ( a ) ) && ( a < 0.0 );
+  l3 = ( b == ( int ) ( b ) ) && ( b < 0.0 );
+  l4 = ( c - a == ( int ) ( c - a ) ) && ( c - a <= 0.0 );
+  l5 = ( c - b == ( int ) ( c - b ) ) && ( c - b <= 0.0 );
+
+  if ( l0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8_HYPER_2F1 - Fatal error!\n";
+    std::cerr << "  The hypergeometric series is divergent.\n";
+    std::cerr << "  C is integral and negative.\n";
+    std::cerr << "  C = " << c << "\n";
+    std::exit ( 1 );
+  }
+
+  if ( l1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8_HYPER_2F1 - Fatal error!\n";
+    std::cerr << "  The hypergeometric series is divergent.\n";
+    std::cerr << "  1 - X < 0, C - A - B <= 0\n";
+    std::cerr << "  A = " << a << "\n";
+    std::cerr << "  B = " << b << "\n";
+    std::cerr << "  C = " << c << "\n";
+    std::cerr << "  X = " << x << "\n";
+    std::exit ( 1 );
+  }
+
+  if ( 0.95 < x )
+  {
+    eps = 1.0E-08;
+  }
+  else
+  {
+    eps = 1.0E-15;
+  }
+
+  if ( x == 0.0 || a == 0.0 || b == 0.0 )
+  {
+    hf = 1.0;
+    return hf;
+  }
+  else if ( 1.0 - x == eps && 0.0 < c - a - b )
+  {
+    gc = webbur::r8_gamma ( c );
+    gcab = webbur::r8_gamma ( c - a - b );
+    gca = webbur::r8_gamma ( c - a );
+    gcb = webbur::r8_gamma ( c - b );
+    hf = gc * gcab / ( gca * gcb );
+    return hf;
+  }
+  else if ( 1.0 + x <= eps && r8_abs ( c - a + b - 1.0 ) <= eps )
+  {
+    g0 = std::sqrt ( pi ) * std::pow ( 2.0, - a );
+    g1 = webbur::r8_gamma ( c );
+    g2 = webbur::r8_gamma ( 1.0 + a / 2.0 - b );
+    g3 = webbur::r8_gamma ( 0.5 + 0.5 * a );
+    hf = g0 * g1 / ( g2 * g3 );
+    return hf;
+  }
+  else if ( l2 || l3 )
+  {
+    if ( l2 )
+    {
+      nm = ( int ) ( webbur::r8_abs ( a ) );
+    }
+
+    if ( l3 )
+    {
+      nm = ( int ) ( webbur::r8_abs ( b ) );
+    }
+
+    hf = 1.0;
+    r = 1.0;
+
+    for ( k = 1; k <= nm; k++ )
+    {
+      r = r * ( a + k - 1.0 ) * ( b + k - 1.0 )
+        / ( k * ( c + k - 1.0 ) ) * x;
+      hf = hf + r;
+    }
+
+    return hf;
+  }
+  else if ( l4 || l5 )
+  {
+    if ( l4 )
+    {
+      nm = ( int ) ( webbur::r8_abs ( c - a ) );
+    }
+
+    if ( l5 )
+    {
+      nm = ( int ) ( webbur::r8_abs ( c - b ) );
+    }
+
+    hf = 1.0;
+    r  = 1.0;
+    for ( k = 1; k <= nm; k++ )
+    {
+      r = r * ( c - a + k - 1.0 ) * ( c - b + k - 1.0 )
+        / ( k * ( c + k - 1.0 ) ) * x;
+      hf = hf + r;
+    }
+    hf = std::pow ( 1.0 - x, c - a - b ) * hf;
+    return hf;
+  }
+
+  aa = a;
+  bb = b;
+  x1 = x;
+
+  if ( x < 0.0 )
+  {
+    x = x / ( x - 1.0 );
+    if ( a < c && b < a && 0.0 < b )
+    {
+      a = bb;
+      b = aa;
+    }
+    b = c - b;
+  }
+
+  if ( 0.75 <= x )
+  {
+    gm = 0.0;
+
+    if ( webbur::r8_abs ( c - a - b - ( int ) ( c - a - b ) ) < 1.0E-15 )
+    {
+      m = ( int ) ( c - a - b );
+      ga = webbur::r8_gamma ( a );
+      gb = webbur::r8_gamma ( b );
+      gc = webbur::r8_gamma ( c );
+      gam = webbur::r8_gamma ( a + m );
+      gbm = webbur::r8_gamma ( b + m );
+
+      pa = webbur::r8_psi ( a );
+      pb = webbur::r8_psi ( b );
+
+      if ( m != 0 )
+      {
+        gm = 1.0;
+      }
+
+      for ( j = 1; j <= std::abs ( m ) - 1; j++ )
+      {
+        gm = gm * j;
+      }
+
+      rm = 1.0;
+      for ( j = 1; j <= std::abs ( m ); j++ )
+      {
+        rm = rm * j;
+      }
+
+      f0 = 1.0;
+      r0 = 1.0;;
+      r1 = 1.0;
+      sp0 = 0.0;;
+      sp = 0.0;
+
+      if ( 0 <= m )
+      {
+        c0 = gm * gc / ( gam * gbm );
+        c1 = - gc * std::pow ( x - 1.0, m ) / ( ga * gb * rm );
+
+        for ( k = 1; k <= m - 1; k++ )
+        {
+          r0 = r0 * ( a + k - 1.0 ) * ( b + k - 1.0 )
+            / ( k * ( k - m ) ) * ( 1.0 - x );
+          f0 = f0 + r0;
+        }
+
+        for ( k = 1; k <= m; k++ )
+        {
+          sp0 = sp0 + 1.0 / ( a + k - 1.0 ) + 1.0 / ( b + k - 1.0 )
+          - 1.0 / ( double ) ( k );
+        }
+
+        f1 = pa + pb + sp0 + 2.0 * el + std::log ( 1.0 - x );
+        hw = f1;
+
+        for ( k = 1; k <= 250; k++ )
+        {
+          sp = sp + ( 1.0 - a ) / ( k * ( a + k - 1.0 ) )
+            + ( 1.0 - b ) / ( k * ( b + k - 1.0 ) );
+
+          sm = 0.0;
+          for ( j = 1; j <= m; j++ )
+          {
+            sm = sm + ( 1.0 - a )
+              / ( ( j + k ) * ( a + j + k - 1.0 ) )
+              + 1.0 / ( b + j + k - 1.0 );
+          }
+
+          rp = pa + pb + 2.0 * el + sp + sm + std::log ( 1.0 - x );
+
+          r1 = r1 * ( a + m + k - 1.0 ) * ( b + m + k - 1.0 )
+            / ( k * ( m + k ) ) * ( 1.0 - x );
+
+          f1 = f1 + r1 * rp;
+
+          if ( r8_abs ( f1 - hw ) < r8_abs ( f1 ) * eps )
+          {
+            break;
+          }
+          hw = f1;
+        }
+        hf = f0 * c0 + f1 * c1;
+      }
+      else if ( m < 0 )
+      {
+        m = - m;
+        c0 = gm * gc / ( ga * gb * std::pow ( 1.0 - x, m ) );
+        c1 = - std::pow ( - 1.0, m ) * gc / ( gam * gbm * rm );
+
+        for ( k = 1; k <= m - 1; k++ )
+        {
+          r0 = r0 * ( a - m + k - 1.0 ) * ( b - m + k - 1.0 )
+            / ( k * ( k - m ) ) * ( 1.0 - x );
+          f0 = f0 + r0;
+        }
+
+        for ( k = 1; k <= m; k++ )
+        {
+          sp0 = sp0 + 1.0 / ( double ) ( k );
+        }
+
+        f1 = pa + pb - sp0 + 2.0 * el + std::log ( 1.0 - x );
+        hw = f1;
+
+        for ( k = 1; k <= 250; k++ )
+        {
+          sp = sp + ( 1.0 - a )
+            / ( k * ( a + k - 1.0 ) )
+            + ( 1.0 - b ) / ( k * ( b + k - 1.0 ) );
+
+          sm = 0.0;
+          for ( j = 1; j <= m; j++ )
+          {
+            sm = sm + 1.0 / ( double ) ( j + k );
+          }
+
+          rp = pa + pb + 2.0 * el + sp - sm + std::log ( 1.0 - x );
+
+          r1 = r1 * ( a + k - 1.0 ) * ( b + k - 1.0 )
+            / ( k * ( m + k ) ) * ( 1.0 - x );
+
+          f1 = f1 + r1 * rp;
+
+          if ( webbur::r8_abs ( f1 - hw ) < webbur::r8_abs ( f1 ) * eps )
+          {
+            break;
+          }
+
+          hw = f1;
+        }
+
+        hf = f0 * c0 + f1 * c1;
+      }
+    }
+    else
+    {
+      ga = webbur::r8_gamma ( a );
+      gb = webbur::r8_gamma ( b );
+      gc = webbur::r8_gamma ( c );
+      gca = webbur::r8_gamma ( c - a );
+      gcb = webbur::r8_gamma ( c - b );
+      gcab = webbur::r8_gamma ( c - a - b );
+      gabc = webbur::r8_gamma ( a + b - c );
+      c0 = gc * gcab / ( gca * gcb );
+      c1 = gc * gabc / ( ga * gb ) * std::pow ( 1.0 - x, c - a - b );
+      hf = 0.0;
+      hw = hf;
+      r0 = c0;
+      r1 = c1;
+
+      for ( k = 1; k <= 250; k++ )
+      {
+        r0 = r0 * ( a + k - 1.0 ) * ( b + k - 1.0 )
+          / ( k * ( a + b - c + k ) ) * ( 1.0 - x );
+
+        r1 = r1 * ( c - a + k - 1.0 ) * ( c - b + k - 1.0 )
+          / ( k * ( c - a - b + k ) ) * ( 1.0 - x );
+
+        hf = hf + r0 + r1;
+
+        if ( webbur::r8_abs ( hf - hw ) < webbur::r8_abs ( hf ) * eps )
+        {
+          break;
+        }
+        hw = hf;
+      }
+      hf = hf + c0 + c1;
+    }
+  }
+  else
+  {
+    a0 = 1.0;
+
+    if ( a < c && c < 2.0 * a && b < c && c < 2.0 * b )
+    {
+      a0 = std::pow ( 1.0 - x, c - a - b );
+      a = c - a;
+      b = c - b;
+    }
+
+    hf = 1.0;
+    hw = hf;
+    r = 1.0;
+
+    for ( k = 1; k <= 250; k++ )
+    {
+      r = r * ( a + k - 1.0 ) * ( b + k - 1.0 )
+        / ( k * ( c + k - 1.0 ) ) * x;
+
+      hf = hf + r;
+
+      if ( webbur::r8_abs ( hf - hw ) <= webbur::r8_abs ( hf ) * eps )
+      {
+        break;
+      }
+
+      hw = hf;
+    }
+    hf = a0 * hf;
+  }
+
+  if ( x1 < 0.0 )
+  {
+    x = x1;
+    c0 = 1.0 / std::pow ( 1.0 - x, aa );
+    hf = c0 * hf;
+  }
+
+  a = aa;
+  b = bb;
+
+  if ( 120 < k )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8_HYPER_2F1 - Warning!\n";
+    std::cerr << "  A large number of iterations were needed.\n";
+    std::cerr << "  The accuracy of the results should be checked.\n";
+  }
+
+  return hf;
+}
+//****************************************************************************80
+
+double r8_max ( double x, double y )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_MAX returns the maximum of two R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 August 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double X, Y, the quantities to compare.
+//
+//    Output, double R8_MAX, the maximum of X and Y.
+//
+{
+  double value;
+
+  if ( y < x )
+  {
+    value = x;
+  }
+  else
+  {
+    value = y;
+  }
+  return value;
+}
+//****************************************************************************80
+
+double r8_min ( double x, double y )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_MIN returns the minimum of two R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    31 August 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double X, Y, the quantities to compare.
+//
+//    Output, double R8_MIN, the minimum of X and Y.
+//
+{
+  double value;
+
+  if ( y < x )
+  {
+    value = y;
+  }
+  else
+  {
+    value = x;
+  }
+  return value;
+}
+//****************************************************************************80
+
+double r8_mop ( int i )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_MOP returns the I-th power of -1 as an R8 value.
+//
+//  Discussion:
+//
+//    An R8 is an double value.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    16 November 2007
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int I, the power of -1.
+//
+//    Output, double R8_MOP, the I-th power of -1.
+//
+{
+  double value;
+
+  if ( ( i % 2 ) == 0 )
+  {
+    value = 1.0;
+  }
+  else
+  {
+    value = -1.0;
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_psi ( double xx )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_PSI evaluates the function Psi(X).
+//
+//  Discussion:
+//
+//    This routine evaluates the logarithmic derivative of the
+//    Gamma function,
+//
+//      PSI(X) = d/dX ( GAMMA(X) ) / GAMMA(X)
+//             = d/dX LN ( GAMMA(X) )
+//
+//    for real X, where either
+//
+//      - XMAX1 < X < - XMIN, and X is not a negative integer,
+//
+//    or
+//
+//      XMIN < X.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    09 February 2008
+//
+//  Author:
+//
+//    Original FORTRAN77 version by William Cody.
+//    C++ version by John Burkardt.
+//
+//  Reference:
+//
+//    William Cody, Anthony Strecok, Henry Thacher,
+//    Chebyshev Approximations for the Psi Function,
+//    Mathematics of Computation,
+//    Volume 27, Number 121, January 1973, pages 123-127.
+//
+//  Parameters:
+//
+//    Input, double XX, the argument of the function.
+//
+//    Output, double R8_PSI, the value of the function.
+//
+{
+  double aug;
+  double den;
+  int i;
+  int n;
+  int nq;
+  double one = 1.0;
+  double p1[9] = {
+   4.5104681245762934160E-03,
+   5.4932855833000385356,
+   3.7646693175929276856E+02,
+   7.9525490849151998065E+03,
+   7.1451595818951933210E+04,
+   3.0655976301987365674E+05,
+   6.3606997788964458797E+05,
+   5.8041312783537569993E+05,
+   1.6585695029761022321E+05 };
+  double p2[7] = {
+  -2.7103228277757834192,
+  -1.5166271776896121383E+01,
+  -1.9784554148719218667E+01,
+  -8.8100958828312219821,
+  -1.4479614616899842986,
+  -7.3689600332394549911E-02,
+  -6.5135387732718171306E-21 };
+  double piov4 = 0.78539816339744830962;
+  double q1[8] = {
+   9.6141654774222358525E+01,
+   2.6287715790581193330E+03,
+   2.9862497022250277920E+04,
+   1.6206566091533671639E+05,
+   4.3487880712768329037E+05,
+   5.4256384537269993733E+05,
+   2.4242185002017985252E+05,
+   6.4155223783576225996E-08 };
+  double q2[6] = {
+   4.4992760373789365846E+01,
+   2.0240955312679931159E+02,
+   2.4736979003315290057E+02,
+   1.0742543875702278326E+02,
+   1.7463965060678569906E+01,
+   8.8427520398873480342E-01 };
+  double sgn;
+  double three = 3.0;
+  double upper;
+  double value;
+  double w;
+  double x;
+  double x01 = 187.0;
+  double x01d = 128.0;
+  double x02 = 6.9464496836234126266E-04;
+  double xinf = 1.70E+38;
+  double xlarge = 2.04E+15;
+  double xmax1 = 3.60E+16;
+  double xmin1 = 5.89E-39;
+  double xsmall = 2.05E-09;
+  double z;
+
+  x = xx;
+  w = webbur::r8_abs ( x );
+  aug = 0.0;
+//
+//  Check for valid arguments, then branch to appropriate algorithm.
+//
+  if ( xmax1 <= - x || w < xmin1 )
+  {
+    if ( 0.0 < x )
+    {
+      value = - xinf;
+    }
+    else
+    {
+      value = xinf;
+    }
+    return value;
+  }
+
+  if ( x < 0.5 )
+  {
+//
+//  X < 0.5, use reflection formula: psi(1-x) = psi(x) + pi * cot(pi*x)
+//  Use 1/X for PI*COTAN(PI*X)  when  XMIN1 < |X| <= XSMALL.
+//
+    if ( w <= xsmall )
+    {
+      aug = - one / x;
+    }
+//
+//  Argument reduction for cotangent.
+//
+    else
+    {
+      if ( x < 0.0 )
+      {
+        sgn = piov4;
+      }
+      else
+      {
+        sgn = - piov4;
+      }
+
+      w = w - ( double ) ( ( int ) ( w ) );
+      nq = ( int ) ( w * 4.0 );
+      w = 4.0 * ( w - ( double ) ( nq ) * 0.25 );
+//
+//  W is now related to the fractional part of 4.0 * X.
+//  Adjust argument to correspond to values in the first
+//  quadrant and determine the sign.
+//
+      n = nq / 2;
+
+      if ( n + n != nq )
+      {
+        w = one - w;
+      }
+
+      z = piov4 * w;
+
+      if ( ( n % 2 ) != 0 )
+      {
+        sgn = - sgn;
+      }
+//
+//  Determine the final value for  -pi * cotan(pi*x).
+//
+      n = ( nq + 1 ) / 2;
+      if ( ( n % 2 ) == 0 )
+      {
+//
+//  Check for singularity.
+//
+        if ( z == 0.0 )
+        {
+          if ( 0.0 < x )
+          {
+            value = -xinf;
+          }
+          else
+          {
+            value = xinf;
+          }
+          return value;
+        }
+        aug = sgn * ( 4.0 / std::tan ( z ) );
+      }
+      else
+      {
+        aug = sgn * ( 4.0 * std::tan ( z ) );
+      }
+    }
+    x = one - x;
+  }
+//
+//  0.5 <= X <= 3.0.
+//
+  if ( x <= three )
+  {
+    den = x;
+    upper = p1[0] * x;
+    for ( i = 1; i <= 7; i++ )
+    {
+      den = ( den + q1[i-1] ) * x;
+      upper = ( upper + p1[i]) * x;
+    }
+    den = ( upper + p1[8] ) / ( den + q1[7] );
+    x = ( x - x01 / x01d ) - x02;
+    value = den * x + aug;
+    return value;
+  }
+//
+//  3.0 < X.
+//
+  if ( x < xlarge )
+  {
+    w = one / ( x * x );
+    den = w;
+    upper = p2[0] * w;
+    for ( i = 1; i <= 5; i++ )
+    {
+      den = ( den + q2[i-1] ) * w;
+      upper = ( upper + p2[i] ) * w;
+    }
+    aug = ( upper + p2[6] ) / ( den + q2[5] ) - 0.5 / x + aug;
+  }
+
+  value = aug + std::log ( x );
+
+  return value;
+}
+//****************************************************************************80
+
+double r8_sign ( double x )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8_SIGN returns the sign of an R8.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 October 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double X, the number whose sign is desired.
+//
+//    Output, double R8_SIGN, the sign of X.
+//
+{
+  double value;
+
+  if ( x < 0.0 )
+  {
+    value = -1.0;
+  }
+  else
+  {
+    value = 1.0;
+  }
+  return value;
+}
+//****************************************************************************80
+
+int r8col_compare ( int m, int n, double a[], int i, int j )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_COMPARE compares two columns in an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//  Example:
+//
+//    Input:
+//
+//      M = 3, N = 4, I = 2, J = 4
+//
+//      A = (
+//        1.  2.  3.  4.
+//        5.  6.  7.  8.
+//        9. 10. 11. 12. )
+//
+//    Output:
+//
+//      R8COL_COMPARE = -1
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    13 September 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, double A[M*N], the M by N array.
+//
+//    Input, int I, J, the columns to be compared.
+//    I and J must be between 1 and N.
+//
+//    Output, int R8COL_COMPARE, the results of the comparison:
+//    -1, column I < column J,
+//     0, column I = column J,
+//    +1, column J < column I.
+//
+{
+  int k;
+  int value;
+//
+//  Check.
+//
+  if ( i < 1 || n < i )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8COL_COMPARE - Fatal error!\n";
+    std::cerr << "  Column index I is out of bounds.\n";
+    std::cerr << "  I = " << i << "\n";
+    std::exit ( 1 );
+  }
+
+  if ( j < 1 || n < j )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8COL_COMPARE - Fatal error!\n";
+    std::cerr << "  Column index J is out of bounds.\n";
+    std::cerr << "  J = " << j << "\n";
+    std::exit ( 1 );
+  }
+
+  value = 0;
+
+  if ( i == j )
+  {
+    return value;
+  }
+
+  k = 0;
+
+  while ( k < m )
+  {
+    if ( a[k+(i-1)*m] < a[k+(j-1)*m] )
+    {
+      value = -1;
+      return value;
+    }
+    else if ( a[k+(j-1)*m] < a[k+(i-1)*m] )
+    {
+      value = +1;
+      return value;
+    }
+    k = k + 1;
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+void r8col_sort_heap_a ( int m, int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_SORT_HEAP_A ascending heapsorts an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    In lexicographic order, the statement "X < Y", applied to two real
+//    vectors X and Y of length M, means that there is some index I, with
+//    1 <= I <= M, with the property that
+//
+//      X(J) = Y(J) for J < I,
+//    and
+//      X(I) < Y(I).
+//
+//    In other words, the first time they differ, X is smaller.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    15 September 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input/output, double A[M*N].
+//    On input, the array of N columns of M-vectors.
+//    On output, the columns of A have been sorted in lexicographic order.
+//
+{
+  int i;
+  int indx;
+  int isgn;
+  int j;
+
+  if ( m <= 0 )
+  {
+    return;
+  }
+
+  if ( n <= 1 )
+  {
+    return;
+  }
+//
+//  Initialize.
+//
+  i = 0;
+  indx = 0;
+  isgn = 0;
+  j = 0;
+//
+//  Call the external heap sorter.
+//
+  for ( ; ; )
+  {
+    webbur::sort_heap_external ( n, &indx, &i, &j, isgn );
+//
+//  Interchange the I and J objects.
+//
+    if ( 0 < indx )
+    {
+      webbur::r8col_swap ( m, n, a, i, j );
+    }
+//
+//  Compare the I and J objects.
+//
+    else if ( indx < 0 )
+    {
+      isgn = webbur::r8col_compare ( m, n, a, i, j );
+    }
+    else if ( indx == 0 )
+    {
+      break;
+    }
+  }
+
+  return;
+}
+//****************************************************************************80
+
+int *r8col_sort_heap_index_a ( int m, int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_SORT_HEAP_INDEX_A does an indexed heap ascending sort of an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    The sorting is not actually carried out.  Rather an index array is
+//    created which defines the sorting.  This array may be used to sort
+//    or index the array, or to sort or index related arrays keyed on the
+//    original array.
+//
+//    A(*,J1) < A(*,J2) if the first nonzero entry of A(*,J1)-A(*,J2)
+//    is negative.
+//
+//    Once the index array is computed, the sorting can be carried out
+//    "implicitly:
+//
+//      A(*,INDX(*)) is sorted,
+//
+//    Note that the index vector is 0-based.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 November 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the number of rows in each column of A.
+//
+//    Input, int N, the number of columns in A.
+//
+//    Input, double A[M*N], the array.
+//
+//    Output, int R8COL_SORT_HEAP_INDEX_A[N], contains the sort index.  The
+//    I-th column of the sorted array is A(*,INDX(I)).
+//
+{
+  double *column;
+  int i;
+  int *indx;
+  int indxt;
+  int ir;
+  int isgn;
+  int j;
+  int k;
+  int l;
+
+  if ( n < 1 )
+  {
+    return NULL;
+  }
+
+  indx = new int[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    indx[i] = i;
+  }
+
+  if ( n == 1 )
+  {
+    return indx;
+  }
+
+  column = new double[m];
+
+  l = n / 2 + 1;
+  ir = n;
+
+  for ( ; ; )
+  {
+    if ( 1 < l )
+    {
+      l = l - 1;
+      indxt = indx[l-1];
+      for ( k = 0; k < m; k++ )
+      {
+        column[k] = a[k+indxt*m];
+      }
+    }
+    else
+    {
+      indxt = indx[ir-1];
+      for ( k = 0; k < m; k++ )
+      {
+        column[k] = a[k+indxt*m];
+      }
+      indx[ir-1] = indx[0];
+      ir = ir - 1;
+
+      if ( ir == 1 )
+      {
+        indx[0] = indxt;
+        break;
+      }
+    }
+
+    i = l;
+    j = l + l;
+
+    while ( j <= ir )
+    {
+      if ( j < ir )
+      {
+        isgn = webbur::r8vec_compare ( m, a+indx[j-1]*m, a+indx[j]*m );
+
+        if ( isgn < 0 )
+        {
+          j = j + 1;
+        }
+      }
+
+      isgn = webbur::r8vec_compare ( m, column, a+indx[j-1]*m );
+
+      if ( isgn < 0 )
+      {
+        indx[i-1] = indx[j-1];
+        i = j;
+        j = j + j;
+      }
+      else
+      {
+        j = ir + 1;
+      }
+    }
+    indx[i-1] = indxt;
+  }
+  delete [] column;
+
+  return indx;
+}
+//****************************************************************************80
+
+int r8col_sorted_unique_count ( int m, int n, double a[], double tol )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_SORTED_UNIQUE_COUNT counts unique elements in a sorted R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    The columns of the array may be ascending or descending sorted.
+//
+//    If the tolerance is large enough, then the concept of uniqueness
+//    can become ambiguous.  If we have a tolerance of 1.5, then in the
+//    list ( 1, 2, 3, 4, 5, 6, 7, 8, 9 ) is it fair to say we have only
+//    one unique entry?  That would be because 1 may be regarded as unique,
+//    and then 2 is too close to 1 to be unique, and 3 is too close to 2 to
+//    be unique and so on.
+//
+//    This seems wrongheaded.  So I prefer the idea that an item is not
+//    unique under a tolerance only if it is close to something that IS unique.
+//    Thus, the unique items are guaranteed to cover the space if we include
+//    a disk of radius TOL around each one.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    01 November 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, double A[M*N], a sorted array, containing
+//    N columns of data.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Output, int R8COL_SORTED_UNIQUE_COUNT, the number of unique columns.
+//
+{
+  double diff;
+  int i;
+  int j1;
+  int j2;
+  int unique_num;
+
+  unique_num = 0;
+
+  if ( n <= 0 )
+  {
+    return unique_num;
+  }
+
+  unique_num = 1;
+  j1 = 0;
+
+  for ( j2 = 1; j2 < n; j2++ )
+  {
+    diff = 0.0;
+    for ( i = 0; i < m; i++ )
+    {
+      diff = webbur::r8_max ( diff, webbur::r8_abs ( a[i+j1*m] - a[i+j2*m] ) );
+    }
+    if ( tol < diff )
+    {
+      unique_num = unique_num + 1;
+      j1 = j2;
+    }
+  }
+
+  return unique_num;
+}
+//****************************************************************************80
+
+void r8col_swap ( int m, int n, double a[], int j1, int j2 )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_SWAP swaps columns J1 and J2 of an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//  Example:
+//
+//    Input:
+//
+//      M = 3, N = 4, J1 = 2, J2 = 4
+//
+//      A = (
+//        1.  2.  3.  4.
+//        5.  6.  7.  8.
+//        9. 10. 11. 12. )
+//
+//    Output:
+//
+//      A = (
+//        1.  4.  3.  2.
+//        5.  8.  7.  6.
+//        9. 12. 11. 10. )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 October 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input/output, double A[M*N], the M by N array.
+//
+//    Input, int J1, J2, the columns to be swapped.
+//    These columns are 1-based.
+//
+{
+  int i;
+  double temp;
+
+  if ( j1 < 1 || n < j1 || j2 < 1 || n < j2 )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8COL_SWAP - Fatal error!\n";
+    std::cerr << "  J1 or J2 is out of bounds.\n";
+    std::cerr << "  J1 =   " << j1 << "\n";
+    std::cerr << "  J2 =   " << j2 << "\n";
+    std::cerr << "  NCOL = " << n << "\n";
+    std::exit ( 1 );
+  }
+
+  if ( j1 == j2 )
+  {
+    return;
+  }
+
+  for ( i = 0; i < m; i++ )
+  {
+    temp          = a[i+(j1-1)*m];
+    a[i+(j1-1)*m] = a[i+(j2-1)*m];
+    a[i+(j2-1)*m] = temp;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void r8col_tol_undex ( int m, int n, double a[], int unique_num, double tol,
+  int undx[], int xdnu[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_TOL_UNDEX indexes tolerably unique entries of an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    The goal of this routine is to determine a vector UNDX,
+//    which points to the unique elements of A, in sorted order,
+//    and a vector XDNU, which identifies, for each entry of A, the index of
+//    the unique sorted element of A.
+//
+//    This is all done with index vectors, so that the elements of
+//    A are never moved.
+//
+//    The first step of the algorithm requires the indexed sorting
+//    of A, which creates arrays INDX and XDNI.  (If all the entries
+//    of A are unique, then these arrays are the same as UNDX and XDNU.)
+//
+//    We then use INDX to examine the entries of A in sorted order,
+//    noting the unique entries, creating the entries of XDNU and
+//    UNDX as we go.
+//
+//    Once this process has been completed, the vector A could be
+//    replaced by a compressed vector XU, containing the unique entries
+//    of A in sorted order, using the formula
+//
+//      XU(*) = A(UNDX(*)).
+//
+//    We could then, if we wished, reconstruct the entire vector A, or
+//    any element of it, by index, as follows:
+//
+//      A(I) = XU(XDNU(I)).
+//
+//    We could then replace A by the combination of XU and XDNU.
+//
+//    Later, when we need the I-th entry of A, we can locate it as
+//    the XDNU(I)-th entry of XU.
+//
+//    Here is an example of a vector A, the sort and inverse sort
+//    index vectors, and the unique sort and inverse unique sort vectors
+//    and the compressed unique sorted vector.
+//
+//      I     A  Indx  Xdni       XU  Undx  Xdnu
+//    ----+-----+-----+-----+--------+-----+-----+
+//      0 | 11.     0     0 |    11.     0     0
+//      1 | 22.     2     4 |    22.     1     1
+//      2 | 11.     5     1 |    33.     3     0
+//      3 | 33.     8     7 |    55.     4     2
+//      4 | 55.     1     8 |                  3
+//      5 | 11.     6     2 |                  0
+//      6 | 22.     7     5 |                  1
+//      7 | 22.     3     6 |                  1
+//      8 | 11.     4     3 |                  0
+//
+//    INDX(2) = 3 means that sorted item(2) is A(3).
+//    XDNI(2) = 5 means that A(2) is sorted item(5).
+//
+//    UNDX(3) = 4 means that unique sorted item(3) is at A(4).
+//    XDNU(8) = 2 means that A(8) is at unique sorted item(2).
+//
+//    XU(XDNU(I))) = X(I).
+//    XU(I)        = X(UNDX(I)).
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 July 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, the dimension of the data values.
+//
+//    Input, int N, the number of data values,
+//
+//    Input, double A[M*N], the data values.
+//
+//    Input, int UNIQUE_NUM, the number of unique values in A.
+//    This value is only required for languages in which the size of
+//    UNDX must be known in advance.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Output, int UNDX[UNIQUE_NUM], the UNDX vector.
+//
+//    Output, int XDNU[N], the XDNU vector.
+//
+{
+  double diff;
+  int i;
+  int i2;
+  int *indx;
+  int j;
+  int k;
+  bool unique;
+//
+//  Implicitly sort the array.
+//
+  indx = webbur::r8col_sort_heap_index_a ( m, n, a );
+//
+//  Consider entry I = 0.
+//  It is unique, so set the number of unique items to K.
+//  Set the K-th unique item to I.
+//  Set the representative of item I to the K-th unique item.
+//
+  i = 0;
+  k = 0;
+  undx[k] = indx[i];
+  xdnu[indx[i]] = k;
+//
+//  Consider entry I.
+//
+//  If it is unique, increase the unique count K, set the
+//  K-th unique item to I, and set the representative of I to K.
+//
+//  If it is not unique, set the representative of item I to a
+//  previously determined unique item that is close to it.
+//
+  for ( i = 1; i < n; i++ )
+  {
+    unique = true;
+    for ( j = 0; j <= k; j++ )
+    {
+      diff = 0.0;
+      for ( i2 = 0; i2 < m; i2++ )
+      {
+        diff = webbur::r8_max ( diff,
+          webbur::r8_abs ( a[i2+indx[i]*m] - a[i2+undx[j]*m] ) );
+      }
+      if ( diff <= tol )
+      {
+        unique = false;
+        xdnu[indx[i]] = j;
+        break;
+      }
+    }
+    if ( unique )
+    {
+      k = k + 1;
+      undx[k] = indx[i];
+      xdnu[indx[i]] = k;
+    }
+  }
+  delete [] indx;
+
+  return;
+}
+//****************************************************************************80
+
+int r8col_tol_unique_count ( int m, int n, double a[], double tol )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_TOL_UNIQUE_COUNT counts tolerably unique entries in an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    The columns of the array may be ascending or descending sorted.
+//
+//    If the tolerance is large enough, then the concept of uniqueness
+//    can become ambiguous.  If we have a tolerance of 1.5, then in the
+//    list ( 1, 2, 3, 4, 5, 6, 7, 8, 9 ) is it fair to say we have only
+//    one unique entry?  That would be because 1 may be regarded as unique,
+//    and then 2 is too close to 1 to be unique, and 3 is too close to 2 to
+//    be unique and so on.
+//
+//    This seems wrongheaded.  So I prefer the idea that an item is not
+//    unique under a tolerance only if it is close to something that IS unique.
+//    Thus, the unique items are guaranteed to cover the space if we include
+//    a disk of radius TOL around each one.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 July 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, double A[M*N], the array of N columns of data.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Output, int R8COL_TOL_UNIQUE_COUNT, the number of unique columns.
+//
+{
+  double diff;
+  int i;
+  int i2;
+  int *indx;
+  int j;
+  int k;
+  bool unique;
+  int *undx;
+
+  undx = new int[n];
+//
+//  Implicitly sort the array.
+//
+  indx = webbur::r8col_sort_heap_index_a ( m, n, a );
+//
+//  Consider entry I = 0.
+//  It is unique, so set the number of unique items to K.
+//  Set the K-th unique item to I.
+//  Set the representative of item I to the K-th unique item.
+//
+  i = 0;
+  k = 0;
+  undx[k] = indx[i];
+//
+//  Consider entry I.
+//
+//  If it is unique, increase the unique count K, set the
+//  K-th unique item to I, and set the representative of I to K.
+//
+//  If it is not unique, set the representative of item I to a
+//  previously determined unique item that is close to it.
+//
+  for ( i = 1; i < n; i++ )
+  {
+    unique = true;
+    for ( j = 0; j <= k; j++ )
+    {
+      diff = 0.0;
+      for ( i2 = 0; i2 < m; i2++ )
+      {
+        diff = webbur::r8_max ( diff,
+          webbur::r8_abs ( a[i2+indx[i]*m] - a[i2+undx[j]*m] ) );
+      }
+      if ( diff <= tol )
+      {
+        unique = false;
+        break;
+      }
+    }
+    if ( unique )
+    {
+      k = k + 1;
+      undx[k] = indx[i];
+    }
+  }
+  delete [] indx;
+  delete [] undx;
+
+  k = k + 1;
+
+  return k;
+}
+//****************************************************************************80
+
+void r8col_undex ( int x_dim, int x_num, double x_val[], int x_unique_num,
+  double tol, int undx[], int xdnu[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_UNDEX returns unique sorted indexes for an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8's, regarded as an array of N columns,
+//    each of length M.
+//
+//    The goal of this routine is to determine a vector UNDX,
+//    which points to the unique elements of X, in sorted order,
+//    and a vector XDNU, which identifies, for each entry of X, the index of
+//    the unique sorted element of X.
+//
+//    This is all done with index vectors, so that the elements of
+//    X are never moved.
+//
+//    The first step of the algorithm requires the indexed sorting
+//    of X, which creates arrays INDX and XDNI.  (If all the entries
+//    of X are unique, then these arrays are the same as UNDX and XDNU.)
+//
+//    We then use INDX to examine the entries of X in sorted order,
+//    noting the unique entries, creating the entries of XDNU and
+//    UNDX as we go.
+//
+//    Once this process has been completed, the vector X could be
+//    replaced by a compressed vector XU, containing the unique entries
+//    of X in sorted order, using the formula
+//
+//      XU(*) = X(UNDX(*)).
+//
+//    We could then, if we wished, reconstruct the entire vector X, or
+//    any element of it, by index, as follows:
+//
+//      X(I) = XU(XDNU(I)).
+//
+//    We could then replace X by the combination of XU and XDNU.
+//
+//    Later, when we need the I-th entry of X, we can locate it as
+//    the XDNU(I)-th entry of XU.
+//
+//    Here is an example of a vector X, the sort and inverse sort
+//    index vectors, and the unique sort and inverse unique sort vectors
+//    and the compressed unique sorted vector.
+//
+//      I     X  Indx  Xdni       XU  Undx  Xdnu
+//    ----+-----+-----+-----+--------+-----+-----+
+//      0 | 11.     0     0 |    11.     0     0
+//      1 | 22.     2     4 |    22.     1     1
+//      2 | 11.     5     1 |    33.     3     0
+//      3 | 33.     8     7 |    55.     4     2
+//      4 | 55.     1     8 |                  3
+//      5 | 11.     6     2 |                  0
+//      6 | 22.     7     5 |                  1
+//      7 | 22.     3     6 |                  1
+//      8 | 11.     4     3 |                  0
+//
+//    INDX(2) = 3 means that sorted item(2) is X(3).
+//    XDNI(2) = 5 means that X(2) is sorted item(5).
+//
+//    UNDX(3) = 4 means that unique sorted item(3) is at X(4).
+//    XDNU(8) = 2 means that X(8) is at unique sorted item(2).
+//
+//    XU(XDNU(I))) = X(I).
+//    XU(I)        = X(UNDX(I)).
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 November 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int X_DIM, the dimension of the data values.
+//    (the number of rows in the R8COL).
+//
+//    Input, int X_NUM, the number of data values,
+//    (the number of columns in the R8COL).
+//
+//    Input, double X_VAL[X_DIM*X_NUM], the data values.
+//
+//    Input, int X_UNIQUE_NUM, the number of unique values in X_VAL.
+//    This value is only required for languages in which the size of
+//    UNDX must be known in advance.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Output, int UNDX[X_UNIQUE_NUM], the UNDX vector.
+//
+//    Output, int XDNU[X_NUM], the XDNU vector.
+//
+{
+  double diff;
+  int i;
+  int *indx;
+  int j;
+  int k;
+//
+//  Implicitly sort the array.
+//
+  indx = webbur::r8col_sort_heap_index_a ( x_dim, x_num, x_val );
+//
+//  Walk through the implicitly sorted array X.
+//
+  i = 0;
+
+  j = 0;
+  undx[j] = indx[i];
+
+  xdnu[indx[i]] = j;
+
+  for ( i = 1; i < x_num; i++ )
+  {
+    diff = 0.0;
+    for ( k = 0; k < x_dim; k++ )
+    {
+      diff = r8_max ( diff,
+        webbur::r8_abs ( x_val[k+indx[i]*x_dim] - x_val[k+undx[j]*x_dim] ) );
+    }
+    if ( tol < diff )
+    {
+      j = j + 1;
+      undx[j] = indx[i];
+    }
+    xdnu[indx[i]] = j;
+  }
+  delete [] indx;
+
+  return;
+}
+//****************************************************************************80
+
+void r8col_unique_index ( int m, int n, double a[], double tol,
+  int unique_index[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8COL_UNIQUE_INDEX indexes the first occurrence of values in an R8COL.
+//
+//  Discussion:
+//
+//    An R8COL is an M by N array of R8 values.
+//    It is regarded as an array of N columns of length M.
+//
+//    For element A(1:M,J) of the matrix, UNIQUE_INDEX(J) is the uniqueness
+//   index of A(1:M,J).  That is, if A_UNIQUE contains the unique elements
+//    of A, gathered in order, then
+//
+//      A_UNIQUE ( 1:M, UNIQUE_INDEX(J) ) = A(1:M,J)
+//
+//    The user must preallocate space for the output array UNIQUE_INDEX.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    24 November 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns of A.
+//    The length of an "element" of A, and the number of "elements".
+//
+//    Input, double A[M*N], the array.
+//
+//    Input, double TOL, a tolerance for equality.
+//
+//    Output, int UNIQUE_INDEX[N], the unique index.
+//
+{
+  double diff;
+  int i;
+  int j1;
+  int j2;
+  int unique_num;
+
+  for ( j1 = 0; j1 < n; j1++ )
+  {
+    unique_index[j1] = -1;
+  }
+  unique_num = 0;
+
+  for ( j1 = 0; j1 < n; j1++ )
+  {
+    if ( unique_index[j1] == -1 )
+    {
+      unique_index[j1] = unique_num;
+
+      for ( j2 = j1 + 1; j2 < n; j2++ )
+      {
+        diff = 0.0;
+        for ( i = 0; i < m; i++ )
+        {
+          diff = webbur::r8_max ( diff,
+            webbur::r8_abs ( a[i+j1*m] - a[i+j2*m] ) );
+        }
+        if ( diff <= tol )
+        {
+          unique_index[j2] = unique_num;
+        }
+      }
+      unique_num = unique_num + 1;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+void r8mat_transpose_print ( int m, int n, double a[], std::string title )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8MAT_TRANSPOSE_PRINT prints an R8MAT, transposed.
+//
+//  Discussion:
+//
+//    An R8MAT is a doubly dimensioned array of R8 values, stored as a vector
+//    in column-major order.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    10 September 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, double A[M*N], an M by N matrix to be printed.
+//
+//    Input, string TITLE, a title.
+//
+{
+  r8mat_transpose_print_some ( m, n, a, 1, 1, m, n, title );
+
+  return;
+}
+//****************************************************************************80
+
+void r8mat_transpose_print_some ( int m, int n, double a[], int ilo, int jlo,
+  int ihi, int jhi, std::string title )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8MAT_TRANSPOSE_PRINT_SOME prints some of an R8MAT, transposed.
+//
+//  Discussion:
+//
+//    An R8MAT is a doubly dimensioned array of R8 values, stored as a vector
+//    in column-major order.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    10 September 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int M, N, the number of rows and columns.
+//
+//    Input, double A[M*N], an M by N matrix to be printed.
+//
+//    Input, int ILO, JLO, the first row and column to print.
+//
+//    Input, int IHI, JHI, the last row and column to print.
+//
+//    Input, string TITLE, a title.
+//
+{
+# define INCX 5
+
+  int i;
+  int i2;
+  int i2hi;
+  int i2lo;
+  int inc;
+  int j;
+  int j2hi;
+  int j2lo;
+
+  std::cout << "\n";
+  std::cout << title << "\n";
+
+  for ( i2lo = i4_max ( ilo, 1 ); i2lo <= i4_min ( ihi, m ); i2lo = i2lo + INCX )
+  {
+    i2hi = i2lo + INCX - 1;
+    i2hi = i4_min ( i2hi, m );
+    i2hi = i4_min ( i2hi, ihi );
+
+    inc = i2hi + 1 - i2lo;
+
+    std::cout << "\n";
+    std::cout << "  Row: ";
+    for ( i = i2lo; i <= i2hi; i++ )
+    {
+      std::cout << std::setw(7) << i - 1 << "       ";
+    }
+    std::cout << "\n";
+    std::cout << "  Col\n";
+    std::cout << "\n";
+
+    j2lo = i4_max ( jlo, 1 );
+    j2hi = i4_min ( jhi, n );
+
+    for ( j = j2lo; j <= j2hi; j++ )
+    {
+      std::cout << std::setw(5) << j - 1 << ":";
+      for ( i2 = 1; i2 <= inc; i2++ )
+      {
+        i = i2lo - 1 + i2;
+        std::cout << std::setw(14) << a[(i-1)+(j-1)*m];
+      }
+      std::cout << "\n";
+    }
+  }
+
+  return;
+# undef INCX
+}
+//****************************************************************************80
+
+void r8mat_write ( std::string output_filename, int m, int n, double table[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8MAT_WRITE writes an R8MAT file.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    11 August 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, string OUTPUT_FILENAME, the output filename.
+//
+//    Input, int M, the spatial dimension.
+//
+//    Input, int N, the number of points.
+//
+//    Input, double TABLE[M*N], the table data.
+//
+{
+  int i;
+  int j;
+  std::ofstream output;
+//
+//  Open the file.
+//
+  output.open ( output_filename.c_str ( ) );
+
+  if ( !output )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8MAT_WRITE - Fatal error!\n";
+    std::cerr << "  Could not open the output file.\n";
+    return;
+  }
+//
+//  Write the data.
+//
+  for ( j = 0; j < n; j++ )
+  {
+    for ( i = 0; i < m; i++ )
+    {
+      output << "  " << std::setw(24) << std::setprecision(16) << table[i+j*m];
+    }
+    output << "\n";
+  }
+//
+//  Close the file.
+//
+  output.close ( );
+
+  return;
+}
+//****************************************************************************80
+
+double r8poly_ant_val ( int n, double poly_cof[], double xval )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8POLY_ANT_VAL evaluates the antiderivative of an R8POLY in standard form.
+//
+//  Discussion:
+//
+//    The constant term of the antiderivative is taken to be zero.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 June 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the order of the polynomial.
+//
+//    Input, double POLY_COF[N], the polynomial coefficients.  POLY_COF[0]
+//    is the constant term, and POLY_COF[N-1] is the coefficient of X**(N-1).
+//
+//    Input, double XVAL, the point where the antiderivative is to be
+//    evaluated.
+//
+//    Output, double R8POLY_ANT_VAL, the value of the antiderivative of the polynomial
+//    at XVAL.
+//
+{
+  int i;
+  double value;
+
+  value = 0.0;
+
+  for ( i = n - 1; 0 <= i; i-- )
+  {
+    value = ( value + poly_cof[i] / ( double ) ( i + 1 ) ) * xval;
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+double *r8vec_chebyshev_new ( int n, double a_first, double a_last )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_CHEBYSHEV_NEW creates a vector of Chebyshev spaced values.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 June 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input, double A_FIRST, A_LAST, the first and last entries.
+//
+//    Output, double R8VEC_CHEBYSHEV_NEW[N], a vector of Chebyshev spaced data.
+//
+{
+  double *a;
+  double c;
+  int i;
+  double pi = 3.141592653589793;
+  double theta;
+
+  a = new double[n];
+
+  if ( n == 1 )
+  {
+    a[0] = ( a_first + a_last ) / 2.0;
+  }
+  else
+  {
+    for ( i = 0; i < n; i++ )
+    {
+      theta = ( double ) ( n - i - 1 ) * pi / ( double ) ( n - 1 );
+
+      c = std::cos ( theta );
+
+      if ( ( n % 2 ) == 1 )
+      {
+        if ( 2 * i + 1 == n )
+        {
+          c = 0.0;
+        }
+      }
+
+      a[i] = ( ( 1.0 - c ) * a_first
+             + ( 1.0 + c ) * a_last )
+             /   2.0;
+    }
+  }
+  return a;
+}
+//****************************************************************************80
+
+int r8vec_compare ( int n, double a[], double b[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_COMPARE compares two R8VEC's.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    The lexicographic ordering is used.
+//
+//  Example:
+//
+//    Input:
+//
+//      A1 = ( 2.0, 6.0, 2.0 )
+//      A2 = ( 2.0, 8.0, 12.0 )
+//
+//    Output:
+//
+//      ISGN = -1
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    23 September 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, double A[N], B[N], the vectors to be compared.
+//
+//    Output, int R8VEC_COMPARE, the results of the comparison:
+//    -1, A is lexicographically less than B,
+//     0, A is equal to B,
+//    +1, A is lexicographically greater than B.
+//
+{
+  int isgn;
+  int k;
+
+  isgn = 0;
+
+  for ( k = 0; k < n; k++ )
+  {
+    if ( a[k] < b[k] )
+    {
+      isgn = -1;
+      return isgn;
+    }
+    else if ( b[k] < a[k] )
+    {
+      isgn = +1;
+      return isgn;
+    }
+  }
+  return isgn;
+}
+//****************************************************************************80
+
+void r8vec_copy ( int n, double a1[], double a2[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_COPY copies an R8VEC.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 July 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, double A1[N], the vector to be copied.
+//
+//    Output, double A2[N], the copy of A1.
+//
+{
+  int i;
+
+  for ( i = 0; i < n; i++ )
+  {
+    a2[i] = a1[i];
+  }
+  return;
+}
+//****************************************************************************80
+
+double *r8vec_copy_new ( int n, double a1[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_COPY_NEW copies an R8VEC to a "new" R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 July 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, double A1[N], the vector to be copied.
+//
+//    Output, double R8VEC_COPY_NEW[N], the copy of A1.
+//
+{
+  double *a2;
+  int i;
+
+  a2 = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    a2[i] = a1[i];
+  }
+  return a2;
+}
+//****************************************************************************80
+
+double r8vec_diff_norm_li ( int n, double a[], double b[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_DIFF_NORM_LI returns the L-oo norm of the difference of R8VEC's.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    The vector L-oo norm is defined as:
+//
+//      R8VEC_NORM_LI = max ( 1 <= I <= N ) abs ( A(I) ).
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 April 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in A.
+//
+//    Input, double A[N], B[N], the vectors.
+//
+//    Output, double R8VEC_DIFF_NORM_LI, the L-oo norm of A - B.
+//
+{
+  int i;
+  double value;
+
+  value = 0.0;
+
+  for ( i = 0; i < n; i++ )
+  {
+    value = webbur::r8_max ( value, webbur::r8_abs ( a[i] - b[i] ) );
+  }
+  return value;
+}
+//****************************************************************************80
+
+void r8vec_direct_product2 ( int factor_index, int factor_order,
+  double factor_value[], int factor_num, int point_num, double w[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_DIRECT_PRODUCT2 creates a direct product of R8VEC's.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    To explain what is going on here, suppose we had to construct
+//    a multidimensional quadrature rule as the product of K rules
+//    for 1D quadrature.
+//
+//    The product rule will be represented as a list of points and weights.
+//
+//    The J-th item in the product rule will be associated with
+//      item J1 of 1D rule 1,
+//      item J2 of 1D rule 2,
+//      ...,
+//      item JK of 1D rule K.
+//
+//    In particular,
+//      X(J) = ( X(1,J1), X(2,J2), ..., X(K,JK))
+//    and
+//      W(J) = W(1,J1) * W(2,J2) * ... * W(K,JK)
+//
+//    So we can construct the quadrature rule if we can properly
+//    distribute the information in the 1D quadrature rules.
+//
+//    This routine carries out that task for the weights W.
+//
+//    Another way to do this would be to compute, one by one, the
+//    set of all possible indices (J1,J2,...,JK), and then index
+//    the appropriate information.  An advantage of the method shown
+//    here is that you can process the K-th set of information and
+//    then discard it.
+//
+//  Example:
+//
+//    Rule 1:
+//      Order = 4
+//      W(1:4) = ( 2, 3, 5, 7 )
+//
+//    Rule 2:
+//      Order = 3
+//      W(1:3) = ( 11, 13, 17 )
+//
+//    Rule 3:
+//      Order = 2
+//      W(1:2) = ( 19, 23 )
+//
+//    Product Rule:
+//      Order = 24
+//      W(1:24) =
+//        ( 2 * 11 * 19 )
+//        ( 3 * 11 * 19 )
+//        ( 4 * 11 * 19 )
+//        ( 7 * 11 * 19 )
+//        ( 2 * 13 * 19 )
+//        ( 3 * 13 * 19 )
+//        ( 5 * 13 * 19 )
+//        ( 7 * 13 * 19 )
+//        ( 2 * 17 * 19 )
+//        ( 3 * 17 * 19 )
+//        ( 5 * 17 * 19 )
+//        ( 7 * 17 * 19 )
+//        ( 2 * 11 * 23 )
+//        ( 3 * 11 * 23 )
+//        ( 5 * 11 * 23 )
+//        ( 7 * 11 * 23 )
+//        ( 2 * 13 * 23 )
+//        ( 3 * 13 * 23 )
+//        ( 5 * 13 * 23 )
+//        ( 7 * 13 * 23 )
+//        ( 2 * 17 * 23 )
+//        ( 3 * 17 * 23 )
+//        ( 5 * 17 * 23 )
+//        ( 7 * 17 * 23 )
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 April 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int FACTOR_INDEX, the index of the factor being processed.
+//    The first factor processed must be factor 0.
+//
+//    Input, int FACTOR_ORDER, the order of the factor.
+//
+//    Input, double FACTOR_VALUE[FACTOR_ORDER], the factor values for
+//    factor FACTOR_INDEX.
+//
+//    Input, int FACTOR_NUM, the number of factors.
+//
+//    Input, int POINT_NUM, the number of elements in the direct product.
+//
+//    Input/output, double W[POINT_NUM], the elements of the
+//    direct product, which are built up gradually.
+//
+//  Local Parameters:
+//
+//    Local, integer START, the first location of a block of values to set.
+//
+//    Local, integer CONTIG, the number of consecutive values to set.
+//
+//    Local, integer SKIP, the distance from the current value of START
+//    to the next location of a block of values to set.
+//
+//    Local, integer REP, the number of blocks of values to set.
+//
+{
+  static int contig = 0;
+  int i;
+  int j;
+  int k;
+  static int rep = 0;
+  static int skip = 0;
+  int start;
+
+  if ( factor_index == 0 )
+  {
+    contig = 1;
+    skip = 1;
+    rep = point_num;
+    for ( i = 0; i < point_num; i++ )
+    {
+      w[i] = 1.0;
+    }
+  }
+
+  rep = rep / factor_order;
+  skip = skip * factor_order;
+
+  for ( j = 0; j < factor_order; j++ )
+  {
+    start = 0 + j * contig;
+
+    for ( k = 1; k <= rep; k++ )
+    {
+      for ( i = start; i < start + contig; i++ )
+      {
+        w[i] = w[i] * factor_value[j];
+      }
+      start = start + skip;
+    }
+  }
+
+  contig = contig * factor_order;
+
+  return;
+}
+//****************************************************************************80
+
+double r8vec_dot_product ( int n, double a1[], double a2[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_DOT_PRODUCT computes the dot product of a pair of R8VEC's.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 July 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, double A1[N], A2[N], the two vectors to be considered.
+//
+//    Output, double R8VEC_DOT_PRODUCT, the dot product of the vectors.
+//
+{
+  int i;
+  double value;
+
+  value = 0.0;
+  for ( i = 0; i < n; i++ )
+  {
+    value = value + a1[i] * a2[i];
+  }
+  return value;
+}
+//****************************************************************************80
+
+double r8vec_i4vec_dot_product ( int n, double r8vec[], int i4vec[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_I4VEC_DOT_PRODUCT computes the dot product of an R8VEC and an I4VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    An I4VEC is a vector of I4's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    30 June 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input, double R8VEC[N], the first vector.
+//
+//    Input, int I4VEC[N], the second vector.
+//
+//    Output, double R8VEC_I4VEC_DOT_PRODUCT, the dot product of the vectors.
+//
+{
+  int i;
+  double value;
+
+  value = 0.0;
+  for ( i = 0; i < n; i++ )
+  {
+    value = value + r8vec[i] * ( double ) ( i4vec[i] );
+  }
+  return value;
+}
+//****************************************************************************80
+
+void r8vec_index_sorted_range ( int n, double r[], int indx[], double r_lo,
+  double r_hi, int *i_lo, int *i_hi )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_INDEX_SORTED_RANGE: search index sorted vector for elements in a range.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    27 September 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of items in the vector.
+//
+//    Input, double R[N], the index sorted vector.
+//
+//    Input, int INDX[N], the vector used to sort R.
+//    The vector R[INDX[*]] is sorted.
+//
+//    Input, double R_LO, R_HI, the limits of the range.
+//
+//    Output, int *I_LO, *I_HI, the range of indices
+//    so that I_LO <= I <= I_HI => R_LO <= R[INDX[I]] <= R_HI.  If no
+//    values in R lie in the range, then I_HI < I_LO will be returned.
+//
+{
+  int i1;
+  int i2;
+  int j1;
+  int j2;
+//
+//  Cases we can handle immediately.
+//
+  if ( r[indx[n-1]] < r_lo )
+  {
+    *i_lo = n;
+    *i_hi = n - 1;
+    return;
+  }
+
+  if ( r_hi < r[indx[0]] )
+  {
+    *i_lo = 0;
+    *i_hi = -1;
+    return;
+  }
+//
+//  Are there are least two intervals?
+//
+  if ( n == 1 )
+  {
+    if ( r_lo <= r[indx[0]] && r[indx[0]] <= r_hi )
+    {
+      *i_lo = 0;
+      *i_hi = 0;
+    }
+    else
+    {
+      *i_lo = -1;
+      *i_hi = -2;
+    }
+    return;
+  }
+//
+//  Bracket R_LO.
+//
+  if ( r_lo <= r[indx[0]] )
+  {
+    *i_lo = 0;
+  }
+  else
+  {
+//
+//  R_LO is in one of the intervals spanned by R(INDX(J1)) to R(INDX(J2)).
+//  Examine the intermediate interval [R(INDX(I1)), R(INDX(I1+1))].
+//  Does R_LO lie here, or below or above?
+//
+    j1 = 0;
+    j2 = n - 1;
+    i1 = ( j1 + j2 - 1 ) / 2;
+    i2 = i1 + 1;
+
+    for ( ; ; )
+    {
+      if ( r_lo < r[indx[i1]] )
+      {
+        j2 = i1;
+        i1 = ( j1 + j2 - 1 ) / 2;
+        i2 = i1 + 1;
+      }
+      else if ( r[indx[i2]] < r_lo )
+      {
+        j1 = i2;
+        i1 = ( j1 + j2 - 1 ) / 2;
+        i2 = i1 + 1;
+      }
+      else
+      {
+        *i_lo = i1;
+        break;
+      }
+    }
+  }
+//
+//  Bracket R_HI.
+//
+  if ( r[indx[n-1]] <= r_hi )
+  {
+    *i_hi = n - 1;
+  }
+  else
+  {
+    j1 = *i_lo;
+    j2 = n - 1;
+    i1 = ( j1 + j2 - 1 ) / 2;
+    i2 = i1 + 1;
+
+    for ( ; ; )
+    {
+      if ( r_hi < r[indx[i1]] )
+      {
+        j2 = i1;
+        i1 = ( j1 + j2 - 1 ) / 2;
+        i2 = i1 + 1;
+      }
+      else if ( r[indx[i2]] < r_hi )
+      {
+        j1 = i2;
+        i1 = ( j1 + j2 - 1 ) / 2;
+        i2 = i1 + 1;
+      }
+      else
+      {
+        *i_hi = i2;
+        break;
+      }
+    }
+  }
+//
+//  We expect to have computed the largest I_LO and smallest I_HI such that
+//    R(INDX(I_LO)) <= R_LO <= R_HI <= R(INDX(I_HI))
+//  but what we want is actually
+//    R_LO <= R(INDX(I_LO)) <= R(INDX(I_HI)) <= R_HI
+//  which we can usually get simply by incrementing I_LO and decrementing I_HI.
+//
+  if ( r[indx[*i_lo]] < r_lo )
+  {
+    *i_lo = *i_lo + 1;
+    if ( n - 1 < *i_lo )
+    {
+      *i_hi = *i_lo - 1;
+    }
+  }
+
+  if ( r_hi < r[indx[*i_hi]] )
+  {
+    *i_hi = *i_hi - 1;
+    if ( *i_hi < 0 )
+    {
+      *i_lo = *i_hi + 1;
+    }
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void r8vec_indexed_heap_d ( int n, double a[], int indx[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_INDEXED_HEAP_D creates a descending heap from an indexed R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    An indexed R8VEC is an R8VEC of data values, and an R8VEC of N indices,
+//    each referencing an entry of the data vector.
+//
+//    The function adjusts the index vector INDX so that, for 1 <= J <= N/2,
+//    we have:
+//      A[INDX[2*J+1]]   <= A[INDX[J]]
+//    and
+//      A[INDX[2*J+2]] <= A[INDX[J]]
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 August 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Albert Nijenhuis, Herbert Wilf,
+//    Combinatorial Algorithms for Computers and Calculators,
+//    Academic Press, 1978,
+//    ISBN: 0-12-519260-6,
+//    LC: QA164.N54.
+//
+//  Parameters:
+//
+//    Input, int N, the size of the index array.
+//
+//    Input, double A[*], the data vector.
+//
+//    Input/output, int INDX[N], the index array.
+//    Each entry of INDX must be a valid index for the array A.
+//    On output, the indices have been reordered into a descending heap.
+//
+{
+  int i;
+  int ifree;
+  int key;
+  int m;
+//
+//  Only nodes N/2 - 1 down to 0 can be "parent" nodes.
+//
+  for ( i = ( n / 2 ) - 1; 0 <= i; i-- )
+  {
+//
+//  Copy the value out of the parent node.
+//  Position IFREE is now "open".
+//
+    key = indx[i];
+    ifree = i;
+
+    for ( ; ; )
+    {
+//
+//  Positions 2*IFREE+1 and 2*IFREE+2 are the descendants of position
+//  IFREE.  (One or both may not exist because they exceed N-1.)
+//
+      m = 2 * ifree + 1;
+//
+//  Does the first position exist?
+//
+      if ( n - 1 < m )
+      {
+        break;
+      }
+//
+//  Does the second position exist?
+//
+      if ( m + 1 <= n - 1 )
+      {
+//
+//  If both positions exist, take the larger of the two values,
+//  and update M if necessary.
+//
+        if ( a[indx[m]] < a[indx[m+1]] )
+        {
+          m = m + 1;
+        }
+      }
+//
+//  If the large descendant is larger than KEY, move it up,
+//  and update IFREE, the location of the free position, and
+//  consider the descendants of THIS position.
+//
+      if ( a[indx[m]] <= a[key] )
+      {
+        break;
+      }
+
+      indx[ifree] = indx[m];
+      ifree = m;
+    }
+//
+//  Once there is no more shifting to do, KEY moves into the free spot IFREE.
+//
+    indx[ifree] = key;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+int r8vec_indexed_heap_d_extract ( int *n, double a[], int indx[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_INDEXED_HEAP_D_EXTRACT: extract from heap descending indexed R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    An indexed R8VEC is an R8VEC of data values, and an R8VEC of N indices,
+//    each referencing an entry of the data vector.
+//
+//    The routine finds the maximum value in the heap, returns that value to the
+//    user, deletes that value from the heap, and restores the heap to its
+//    proper form.
+//
+//    Note that the argument N must be a variable, which will be decremented
+//    before return, and that INDX will hold one less value on output than it
+//    held on input.
+//
+//    This is one of three functions needed to model a priority queue.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 August 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Thomas Cormen, Charles Leiserson, Ronald Rivest,
+//    Introduction to Algorithms,
+//    MIT Press, 2001,
+//    ISBN: 0262032937,
+//    LC: QA76.C662.
+//
+//  Parameters:
+//
+//    Input/output, int *N, the number of items in the index vector.
+//
+//    Input, double A[*], the data vector.
+//
+//    Input/output, int INDX[N], the index vector.
+//
+//    Output, int R8VEC_INDEXED_HEAP_D_EXTRACT, the index in A of the item of
+//    maximum value, which has now been removed from the heap.
+//
+{
+  int indx_extract;
+
+  if ( *n < 1 )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8VEC_INDEXED_HEAP_D_EXTRACT - Fatal error!\n";
+    std::cerr << "  The heap is empty.\n";
+    std::exit ( 1 );
+  }
+//
+//  Get the index of the maximum value.
+//
+  indx_extract = indx[0];
+
+  if ( *n == 1 )
+  {
+    *n = 0;
+    return indx_extract;
+  }
+//
+//  Shift the last index down.
+//
+  indx[0] = indx[*n-1];
+//
+//  Restore the heap structure.
+//
+  *n = *n - 1;
+  webbur::r8vec_indexed_heap_d ( *n, a, indx );
+
+  return indx_extract;
+}
+//****************************************************************************80
+
+void r8vec_indexed_heap_d_insert ( int *n, double a[], int indx[],
+  int indx_insert )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_INDEXED_HEAP_D_INSERT: insert value into heap descending indexed R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    An indexed R8VEC is an R8VEC of data values, and an R8VEC of N indices,
+//    each referencing an entry of the data vector.
+//
+//    Note that the argument N must be a variable, and will be incremented before
+//    return, and that INDX must be able to hold one more entry on output than
+//    it held on input.
+//
+//    This is one of three functions needed to model a priority queue.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 August 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Thomas Cormen, Charles Leiserson, Ronald Rivest,
+//    Introduction to Algorithms,
+//    MIT Press, 2001,
+//    ISBN: 0262032937,
+//    LC: QA76.C662.
+//
+//  Parameters:
+//
+//    Input/output, int *N, the number of items in the index vector.
+//
+//    Input, double A[*], the data vector.
+//
+//    Input/output, int INDX[N], the index vector.
+//
+//    Input, int INDX_INSERT, the index in A of the value
+//    to be inserted into the heap.
+//
+{
+  int i;
+  int parent;
+
+  *n = *n + 1;
+  i = *n - 1;
+
+  while ( 0 < i )
+  {
+    parent = ( i - 1 ) / 2;
+
+    if ( a[indx_insert] <= a[indx[parent]] )
+    {
+      break;
+    }
+
+    indx[i] = indx[parent];
+    i = parent;
+  }
+
+  indx[i] = indx_insert;
+
+  return;
+}
+//****************************************************************************80
+
+int r8vec_indexed_heap_d_max ( int n, double a[], int indx[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_INDEXED_HEAP_D_MAX: maximum value in heap descending indexed R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    An indexed R8VEC is an R8VEC of data values, and an R8VEC of N indices,
+//    each referencing an entry of the data vector.
+//
+//    This is one of three functions needed to model a priority queue.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    18 August 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Thomas Cormen, Charles Leiserson, Ronald Rivest,
+//    Introduction to Algorithms,
+//    MIT Press, 2001,
+//    ISBN: 0262032937,
+//    LC: QA76.C662.
+//
+//  Parameters:
+//
+//    Input, int N, the number of items in the index vector.
+//
+//    Input, double A[*], the data vector.
+//
+//    Input, int INDX[N], the index vector.
+//
+//    Output, int R8VEC_INDEXED_HEAP_D_MAX, the index in A of the maximum value
+//    in the heap.
+//
+{
+  int indx_max;
+
+  indx_max = indx[0];
+
+  return indx_max;
+}
+//****************************************************************************80
+
+double *r8vec_legendre_new ( int n, double a_first, double a_last )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_LEGENDRE_NEW creates a vector of Chebyshev spaced values.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    17 June 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input, double A_FIRST, A_LAST, the first and last entries.
+//
+//    Output, double R8VEC_LEGENDRE_NEW[N], a vector of Legendre spaced data.
+//
+{
+  double *a;
+  int i;
+
+  a = webbur::legendre_zeros ( n );
+
+  for ( i = 0; i < n; i++ )
+  {
+    a[i] = ( ( 1.0 - a[i] ) * a_first
+           + ( 1.0 + a[i] ) * a_last )
+           /   2.0;
+  }
+  return a;
+}
+//****************************************************************************80
+
+double *r8vec_linspace_new ( int n, double a_first, double a_last )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_LINSPACE_NEW creates a vector of linearly spaced values.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    14 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input, double A_FIRST, A_LAST, the first and last entries.
+//
+//    Output, double R8VEC_LINSPACE_NEW[N], a vector of linearly spaced data.
+//
+{
+  double *a;
+  int i;
+
+  a = new double[n];
+
+  if ( n == 1 )
+  {
+    a[0] = ( a_first + a_last ) / 2.0;
+  }
+  else
+  {
+    for ( i = 0; i < n; i++ )
+    {
+      a[i] = ( ( double ) ( n - 1 - i ) * a_first
+             + ( double ) (         i ) * a_last )
+             / ( double ) ( n - 1     );
+    }
+  }
+  return a;
+}
+//****************************************************************************80
+
+double r8vec_min ( int n, double r8vec[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_MIN returns the value of the minimum element in an R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 July 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the array.
+//
+//    Input, double R8VEC[N], the array to be checked.
+//
+//    Output, double R8VEC_MIN, the value of the minimum element.
+//
+{
+  int i;
+  double value;
+
+  value = r8vec[0];
+
+  for ( i = 1; i < n; i++ )
+  {
+    if ( r8vec[i] < value )
+    {
+      value = r8vec[i];
+    }
+  }
+  return value;
+}
+//****************************************************************************80
+
+double r8vec_min_pos ( int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_MIN_POS returns the minimum positive value of an R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 November 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries.
+//
+//    Input, double A[N], the array.
+//
+//    Output, double R8VEC_MIN_POS, the smallest positive entry,
+//    or R8_HUGE if no entry is positive.
+//
+{
+  int i;
+  double r8_huge = 1.0E+30;
+  double value;
+
+  value = r8_huge;
+
+  for ( i = 0; i < n; i++ )
+  {
+    if ( 0.0 < a[i] )
+    {
+      if ( a[i] < value )
+      {
+        value = a[i];
+      }
+    }
+  }
+  return value;
+}
+//****************************************************************************80
+
+void r8vec_print ( int n, double a[], std::string title )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_PRINT prints an R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    16 August 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of components of the vector.
+//
+//    Input, double A[N], the vector to be printed.
+//
+//    Input, string TITLE, a title.
+//
+{
+  int i;
+
+  std::cout << "\n";
+  std::cout << title << "\n";
+  std::cout << "\n";
+  for ( i = 0; i < n; i++ )
+  {
+    std::cout << "  " << std::setw(8)  << i
+              << ": " << std::setw(14) << a[i]  << "\n";
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void r8vec_scale ( double s, int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_SCALE multiples an R8VEC by a scale factor.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    22 September 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, double S, the scale factor.
+//
+//    Input, int N, the number of entries in the vectors.
+//
+//    Input/output, double A[N], the vector to be scaled.
+//    On output, A[] = S * A[].
+//
+{
+  int i;
+
+  for ( i = 0; i < n; i++ )
+  {
+    a[i] = s * a[i];
+  }
+  return;
+}
+//****************************************************************************80
+
+void r8vec_sort_heap_index_a ( int n, double a[], int indx[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_SORT_HEAP_INDEX_A does an indexed heap ascending sort of an R8VEC
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    The sorting is not actually carried out.  Rather an index array is
+//    created which defines the sorting.  This array may be used to sort
+//    or index the array, or to sort or index related arrays keyed on the
+//    original array.
+//
+//    Once the index array is computed, the sorting can be carried out
+//    "implicitly:
+//
+//      a(indx(*))
+//
+//    or explicitly, by the call
+//
+//      r8vec_permute ( n, indx, 0, a )
+//
+//    after which a(*) is sorted.
+//
+//    Note that the index vector is 0-based.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the array.
+//
+//    Input, double A[N], an array to be index-sorted.
+//
+//    Output, int INDX[N], contains the sort index.  The
+//    I-th element of the sorted array is A(INDX(I)).
+//
+{
+  double aval;
+  int i;
+  int indxt;
+  int ir;
+  int j;
+  int l;
+
+  if ( n < 1 )
+  {
+    return;
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    indx[i] = i;
+  }
+
+  if ( n == 1 )
+  {
+    return;
+  }
+
+  l = n / 2 + 1;
+  ir = n;
+
+  for ( ; ; )
+  {
+    if ( 1 < l )
+    {
+      l = l - 1;
+      indxt = indx[l-1];
+      aval = a[indxt];
+    }
+    else
+    {
+      indxt = indx[ir-1];
+      aval = a[indxt];
+      indx[ir-1] = indx[0];
+      ir = ir - 1;
+
+      if ( ir == 1 )
+      {
+        indx[0] = indxt;
+        break;
+      }
+    }
+
+    i = l;
+    j = l + l;
+
+    while ( j <= ir )
+    {
+      if ( j < ir )
+      {
+        if ( a[indx[j-1]] < a[indx[j]] )
+        {
+          j = j + 1;
+        }
+      }
+
+      if ( aval < a[indx[j-1]] )
+      {
+        indx[i-1] = indx[j-1];
+        i = j;
+        j = j + j;
+      }
+      else
+      {
+        j = ir + 1;
+      }
+    }
+    indx[i-1] = indxt;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+int *r8vec_sort_heap_index_a_new ( int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_SORT_HEAP_INDEX_A_NEW does an indexed heap ascending sort of an R8VEC
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    The sorting is not actually carried out.  Rather an index array is
+//    created which defines the sorting.  This array may be used to sort
+//    or index the array, or to sort or index related arrays keyed on the
+//    original array.
+//
+//    Once the index array is computed, the sorting can be carried out
+//    "implicitly:
+//
+//      a(indx(*))
+//
+//    or explicitly, by the call
+//
+//      r8vec_permute ( n, indx, 0, a )
+//
+//    after which a(*) is sorted.
+//
+//    Note that the index vector is 0-based.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    02 October 2010
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the array.
+//
+//    Input, double A[N], an array to be index-sorted.
+//
+//    Output, int R8VEC_SORT_HEAP_INDEX_A_NEW[N], contains the sort index.  The
+//    I-th element of the sorted array is A(INDX(I)).
+//
+{
+  double aval;
+  int i;
+  int *indx;
+  int indxt;
+  int ir;
+  int j;
+  int l;
+
+  if ( n < 1 )
+  {
+    return NULL;
+  }
+
+  indx = new int[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    indx[i] = i;
+  }
+
+  if ( n == 1 )
+  {
+    return indx;
+  }
+
+  l = n / 2 + 1;
+  ir = n;
+
+  for ( ; ; )
+  {
+    if ( 1 < l )
+    {
+      l = l - 1;
+      indxt = indx[l-1];
+      aval = a[indxt];
+    }
+    else
+    {
+      indxt = indx[ir-1];
+      aval = a[indxt];
+      indx[ir-1] = indx[0];
+      ir = ir - 1;
+
+      if ( ir == 1 )
+      {
+        indx[0] = indxt;
+        break;
+      }
+    }
+
+    i = l;
+    j = l + l;
+
+    while ( j <= ir )
+    {
+      if ( j < ir )
+      {
+        if ( a[indx[j-1]] < a[indx[j]] )
+        {
+          j = j + 1;
+        }
+      }
+
+      if ( aval < a[indx[j-1]] )
+      {
+        indx[i-1] = indx[j-1];
+        i = j;
+        j = j + j;
+      }
+      else
+      {
+        j = ir + 1;
+      }
+    }
+    indx[i-1] = indxt;
+  }
+
+  return indx;
+}
+//****************************************************************************80
+
+void r8vec_stutter ( int n, double a[], int m, double am[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_STUTTER makes a "stuttering" copy of an R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//    Applying a stuttering factor M of 3, the vector A = ( 1, 5, 8 ) becomes
+//    AM = ( 1, 1, 1, 5, 5, 5, 8, 8, 8 ).
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    28 March 2011
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the size of the input vector.
+//
+//    Input, double A[N], the vector.
+//
+//    Input, int M, the "stuttering factor".
+//
+//    Output, double AM[M*N], the stuttering vector.
+//
+{
+  int i;
+  int j;
+  int k;
+
+  k = 0;
+  for ( i = 0; i < n; i++ )
+  {
+    for ( j = 0; j < m; j++ )
+    {
+      am[k] = a[i];
+      k = k + 1;
+    }
+  }
+  return;
+}
+//****************************************************************************80
+
+double r8vec_sum ( int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_SUM returns the sum of an R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a double precision vector.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    15 October 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input, double A[N], the vector.
+//
+//    Output, double R8VEC_SUM, the sum of the vector.
+//
+{
+  int i;
+  double value;
+
+  value = 0.0;
+  for ( i = 0; i < n; i++ )
+  {
+    value = value + a[i];
+  }
+
+  return value;
+}
+//****************************************************************************80
+
+void r8vec_uniform_01 ( int n, int *seed, double r[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_UNIFORM_01 returns a unit pseudorandom R8VEC.
+//
+//  Discussion:
+//
+//    This routine implements the recursion
+//
+//      seed = ( 16807 * seed ) mod ( 2^31 - 1 )
+//      u = seed / ( 2^31 - 1 )
+//
+//    The integer arithmetic never requires more than 32 bits,
+//    including a sign bit.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 August 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Paul Bratley, Bennett Fox, Linus Schrage,
+//    A Guide to Simulation,
+//    Second Edition,
+//    Springer, 1987,
+//    ISBN: 0387964673,
+//    LC: QA76.9.C65.B73.
+//
+//    Bennett Fox,
+//    Algorithm 647:
+//    Implementation and Relative Efficiency of Quasirandom
+//    Sequence Generators,
+//    ACM Transactions on Mathematical Software,
+//    Volume 12, Number 4, December 1986, pages 362-376.
+//
+//    Pierre L'Ecuyer,
+//    Random Number Generation,
+//    in Handbook of Simulation,
+//    edited by Jerry Banks,
+//    Wiley, 1998,
+//    ISBN: 0471134031,
+//    LC: T57.62.H37.
+//
+//    Peter Lewis, Allen Goodman, James Miller,
+//    A Pseudo-Random Number Generator for the System/360,
+//    IBM Systems Journal,
+//    Volume 8, Number 2, 1969, pages 136-143.
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input/output, int *SEED, a seed for the random number generator.
+//
+//    Output, double R[N], the vector of pseudorandom values.
+//
+{
+  int i;
+  int i4_huge = 2147483647;
+  int k;
+
+  if ( *seed == 0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8VEC_UNIFORM_01 - Fatal error!\n";
+    std::cerr << "  Input value of SEED = 0.\n";
+    std::exit ( 1 );
+  }
+
+  for ( i = 0; i < n; i++ )
+  {
+    k = *seed / 127773;
+
+    *seed = 16807 * ( *seed - k * 127773 ) - k * 2836;
+
+    if ( *seed < 0 )
+    {
+      *seed = *seed + i4_huge;
+    }
+
+    r[i] = ( double ) ( *seed ) * 4.656612875E-10;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+double *r8vec_uniform_01_new ( int n, int *seed )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_UNIFORM_01_NEW returns a new unit pseudorandom R8VEC.
+//
+//  Discussion:
+//
+//    This routine implements the recursion
+//
+//      seed = ( 16807 * seed ) mod ( 2^31 - 1 )
+//      u = seed / ( 2^31 - 1 )
+//
+//    The integer arithmetic never requires more than 32 bits,
+//    including a sign bit.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 August 2004
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Reference:
+//
+//    Paul Bratley, Bennett Fox, Linus Schrage,
+//    A Guide to Simulation,
+//    Second Edition,
+//    Springer, 1987,
+//    ISBN: 0387964673,
+//    LC: QA76.9.C65.B73.
+//
+//    Bennett Fox,
+//    Algorithm 647:
+//    Implementation and Relative Efficiency of Quasirandom
+//    Sequence Generators,
+//    ACM Transactions on Mathematical Software,
+//    Volume 12, Number 4, December 1986, pages 362-376.
+//
+//    Pierre L'Ecuyer,
+//    Random Number Generation,
+//    in Handbook of Simulation,
+//    edited by Jerry Banks,
+//    Wiley, 1998,
+//    ISBN: 0471134031,
+//    LC: T57.62.H37.
+//
+//    Peter Lewis, Allen Goodman, James Miller,
+//    A Pseudo-Random Number Generator for the System/360,
+//    IBM Systems Journal,
+//    Volume 8, Number 2, 1969, pages 136-143.
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Input/output, int *SEED, a seed for the random number generator.
+//
+//    Output, double R8VEC_UNIFORM_01_NEW[N], the vector of pseudorandom values.
+//
+{
+  int i;
+  int i4_huge = 2147483647;
+  int k;
+  double *r;
+
+  if ( *seed == 0 )
+  {
+    std::cerr << "\n";
+    std::cerr << "R8VEC_UNIFORM_01_NEW - Fatal error!\n";
+    std::cerr << "  Input value of SEED = 0.\n";
+    std::exit ( 1 );
+  }
+
+  r = new double[n];
+
+  for ( i = 0; i < n; i++ )
+  {
+    k = *seed / 127773;
+
+    *seed = 16807 * ( *seed - k * 127773 ) - k * 2836;
+
+    if ( *seed < 0 )
+    {
+      *seed = *seed + i4_huge;
+    }
+
+    r[i] = ( double ) ( *seed ) * 4.656612875E-10;
+  }
+
+  return r;
+}
+//****************************************************************************80
+
+void r8vec_zero ( int n, double a[] )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    R8VEC_ZERO zeroes an R8VEC.
+//
+//  Discussion:
+//
+//    An R8VEC is a vector of R8's.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    03 July 2005
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int N, the number of entries in the vector.
+//
+//    Output, double A[N], a vector of zeroes.
+//
+{
+  int i;
+
+  for ( i = 0; i < n; i++ )
+  {
+    a[i] = 0.0;
+  }
+  return;
+}
+//****************************************************************************80
+
+void sort_heap_external ( int n, int *indx, int *i, int *j, int isgn )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    SORT_HEAP_EXTERNAL externally sorts a list of items into ascending order.
+//
+//  Discussion:
+//
+//    The actual list is not passed to the routine.  Hence it may
+//    consist of integers, reals, numbers, names, etc.  The user,
+//    after each return from the routine, will be asked to compare or
+//    interchange two items.
+//
+//    The current version of this code mimics the FORTRAN version,
+//    so the values of I and J, in particular, are FORTRAN indices.
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    05 February 2004
+//
+//  Author:
+//
+//    Original FORTRAN77 version by Albert Nijenhuis, Herbert Wilf.
+//    C++ version by John Burkardt
+//
+//  Reference:
+//
+//    Albert Nijenhuis, Herbert Wilf,
+//    Combinatorial Algorithms,
+//    Academic Press, 1978, second edition,
+//    ISBN 0-12-519260-6.
+//
+//  Parameters:
+//
+//    Input, int N, the length of the input list.
+//
+//    Input/output, int *INDX.
+//    The user must set INDX to 0 before the first call.
+//    On return,
+//      if INDX is greater than 0, the user must interchange
+//      items I and J and recall the routine.
+//      If INDX is less than 0, the user is to compare items I
+//      and J and return in ISGN a negative value if I is to
+//      precede J, and a positive value otherwise.
+//      If INDX is 0, the sorting is done.
+//
+//    Output, int *I, *J.  On return with INDX positive,
+//    elements I and J of the user's list should be
+//    interchanged.  On return with INDX negative, elements I
+//    and J are to be compared by the user.
+//
+//    Input, int ISGN. On return with INDX negative, the
+//    user should compare elements I and J of the list.  If
+//    item I is to precede item J, set ISGN negative,
+//    otherwise set ISGN positive.
+//
+{
+  static int i_save = 0;
+  static int j_save = 0;
+  static int k = 0;
+  static int k1 = 0;
+  static int n1 = 0;
+//
+//  INDX = 0: This is the first call.
+//
+  if ( *indx == 0 )
+  {
+
+    i_save = 0;
+    j_save = 0;
+    k = n / 2;
+    k1 = k;
+    n1 = n;
+  }
+//
+//  INDX < 0: The user is returning the results of a comparison.
+//
+  else if ( *indx < 0 )
+  {
+    if ( *indx == -2 )
+    {
+      if ( isgn < 0 )
+      {
+        i_save = i_save + 1;
+      }
+      j_save = k1;
+      k1 = i_save;
+      *indx = -1;
+      *i = i_save;
+      *j = j_save;
+      return;
+    }
+
+    if ( 0 < isgn )
+    {
+      *indx = 2;
+      *i = i_save;
+      *j = j_save;
+      return;
+    }
+
+    if ( k <= 1 )
+    {
+      if ( n1 == 1 )
+      {
+        i_save = 0;
+        j_save = 0;
+        *indx = 0;
+      }
+      else
+      {
+        i_save = n1;
+        j_save = 1;
+        n1 = n1 - 1;
+        *indx = 1;
+      }
+      *i = i_save;
+      *j = j_save;
+      return;
+    }
+    k = k - 1;
+    k1 = k;
+  }
+//
+//  0 < INDX: the user was asked to make an interchange.
+//
+  else if ( *indx == 1 )
+  {
+    k1 = k;
+  }
+
+  for ( ; ; )
+  {
+
+    i_save = 2 * k1;
+
+    if ( i_save == n1 )
+    {
+      j_save = k1;
+      k1 = i_save;
+      *indx = -1;
+      *i = i_save;
+      *j = j_save;
+      return;
+    }
+    else if ( i_save <= n1 )
+    {
+      j_save = i_save + 1;
+      *indx = -2;
+      *i = i_save;
+      *j = j_save;
+      return;
+    }
+
+    if ( k <= 1 )
+    {
+      break;
+    }
+
+    k = k - 1;
+    k1 = k;
+  }
+
+  if ( n1 == 1 )
+  {
+    i_save = 0;
+    j_save = 0;
+    *indx = 0;
+    *i = i_save;
+    *j = j_save;
+  }
+  else
+  {
+    i_save = n1;
+    j_save = 1;
+    n1 = n1 - 1;
+    *indx = 1;
+    *i = i_save;
+    *j = j_save;
+  }
+
+  return;
+}
+//****************************************************************************80
+
+void timestamp ( )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    TIMESTAMP prints the current YMDHMS date as a time stamp.
+//
+//  Example:
+//
+//    31 May 2001 09:45:54 AM
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    08 July 2009
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    None
+//
+{
+# define TIME_SIZE 40
+
+  static char time_buffer[TIME_SIZE];
+  const struct std::tm *tm_ptr;
+  size_t len;
+  std::time_t now;
+
+  now = std::time ( NULL );
+  tm_ptr = std::localtime ( &now );
+
+  len = std::strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm_ptr );
+
+  std::cout << time_buffer << "\n";
+
+  return;
+# undef TIME_SIZE
+}
+//****************************************************************************80
+
+void vec_colex_next3 ( int dim_num, int base[], int a[], bool *more )
+
+//****************************************************************************80
+//
+//  Purpose:
+//
+//    VEC_COLEX_NEXT3 generates vectors in colex order.
+//
+//  Discussion:
+//
+//    The vectors are produced in colexical order, starting with
+//
+//    (1,        1,        ...,1),
+//    (2,        1,        ...,1),
+//     ...
+//    (BASE(1),  1,        ...,1)
+//
+//    (1,        2,        ...,1)
+//    (2,        2,        ...,1)
+//    ...
+//    (BASE(1),  2,        ...,1)
+//
+//    (1,        3,        ...,1)
+//    (2,        3,        ...,1)
+//    ...
+//    (BASE(1),  BASE(2), ...,BASE(DIM_NUM)).
+//
+//  Example:
+//
+//    DIM_NUM = 2,
+//    BASE = { 3, 3 }
+//
+//    1   1
+//    2   1
+//    3   1
+//    1   2
+//    2   2
+//    3   2
+//    1   3
+//    2   3
+//    3   3
+//
+//  Licensing:
+//
+//    This code is distributed under the GNU LGPL license.
+//
+//  Modified:
+//
+//    19 August 2008
+//
+//  Author:
+//
+//    John Burkardt
+//
+//  Parameters:
+//
+//    Input, int DIM_NUM, the spatial dimension.
+//
+//    Input, int BASE[DIM_NUM], the bases to be used in each dimension.
+//    In dimension I, entries will range from 1 to BASE[I].
+//
+//    Output, int A[DIM_NUM], the next vector.
+//
+//    Input/output, bool *MORE.  Set this variable false before
+//    the first call.  On return, MORE is TRUE if another vector has
+//    been computed.  If MORE is returned FALSE, ignore the output
+//    vector and stop calling the routine.
+//
+{
+  int i;
+
+  if ( !( *more ) )
+  {
+    for ( i = 0; i < dim_num; i++ )
+    {
+      a[i] = 1;
+    }
+    *more = true;
+  }
+  else
+  {
+    for ( i = 0; i < dim_num; i++ )
+    {
+      a[i] = a[i] + 1;
+
+      if ( a[i] <= base[i] )
+      {
+        return;
+      }
+      a[i] = 1;
+    }
+    *more = false;
+  }
+
+  return;
+}
+
+
+}
Index: /issm/trunk/externalpackages/dakota/configs/6.2/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp	(revision 24686)
@@ -0,0 +1,284 @@
+#include "MainEffectsExcelOutput.h"
+
+    std::ostringstream ss;
+
+    MainEffectsExcelOutput::MainEffectsExcelOutput(){;}
+
+    MainEffectsExcelOutput::~MainEffectsExcelOutput(){;}
+
+    std::string MainEffectsExcelOutput::outputColumnHeaders
+                    (int numInputs, int numOutputs) {
+
+        std::ostringstream ss;
+
+         /* input variables */
+         for (int i=0; i<numInputs; i++) {
+         	if (ss.str()!="") ss << ",";
+         	ss << "in(" << i << ")";
+         }
+
+         /* output variables */
+         for (int i=0; i<numOutputs; i++) {
+         	if (ss.str()!="") ss << ",";
+         	ss << "out(" << i << ")";
+         }
+
+         /* number of observations */
+         ss << ",nObservations";
+
+         /* sum of all observations */
+         ss << ",sumOfAllObservations";
+
+         /* average of all observations */
+         ss << ",avgOfAllObservation";
+
+         /* sum of squares of all observations */
+         ss << ",sumOfSquaresOfAllObservations";
+
+         /* degrees of freedom of all observations */
+         ss << ",degreesOfFreedomOfAllObservations";
+
+         /* variance of all Observations */
+         ss << ",varianceOfAllObservations";
+
+         /* sum */
+         ss << ",sum";
+
+         /* average */
+         ss << ",average";
+
+         /* sum of squares */
+         ss << ",sumOfSquares";
+
+         /* variance */
+         ss << ",variance";
+
+         /* sum of squares between groups */
+         ss << ",sumOfSquaresBetweenGroups";
+
+         /* degrees of freedom between groups */
+         ss << ",degreesOfFreedomBetweenGroups";
+
+         /* variance between groups */
+         ss << ",varianceBetweenGroups";
+
+         /* sum of squares between groups */
+         ss << ",sumOfSquaresWithinGroups";
+
+         /* degrees of freedom between groups */
+         ss << ",degreesOfFreedomWithinGroups";
+
+         /* variance between groups */
+         ss << ",varianceWithinGroups";
+
+         /* F */
+         ss << ",F";
+
+         ss << "\n";
+
+         return(ss.str());
+    }
+
+
+    std::string MainEffectsExcelOutput::outputMainEffects
+         (int inputVarIndex, int numInputs,
+          int outputVarIndex, int numOutputs,
+          DDaceMainEffects::Factor factor) {
+
+        std::ostringstream ss;
+        int numberOfLevels =
+            factor.getNumberOfLevels();
+
+        for (int i=0; i<numberOfLevels; i++) {
+        	ss << outputMainEffects (inputVarIndex, numInputs,
+                   outputVarIndex, numOutputs, factor, i);
+        }
+
+        return(ss.str());
+     }
+
+    std::string MainEffectsExcelOutput::outputMainEffects
+         (int inputVarIndex, int numInputs,
+          int outputVarIndex, int numOutputs,
+          DDaceMainEffects::Factor factor,
+          int indexOfInputValue) {
+
+        std::ostringstream ss;
+
+         /* put an F under the input variable */
+         for (int i=0; i<numInputs; i++) {
+         	if (ss.str()!="") ss << ",";
+         	if (i==inputVarIndex) ss << "F";
+         }
+
+         /* put an R under the output variable */
+         for (int i=0; i<numOutputs; i++) {
+         	if (ss.str()!="") ss << ",";
+         	if (i==outputVarIndex) ss << "R";
+         }
+
+         /* number of observations */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.getNumberOfObservations();
+         }
+
+
+         /* sum of all observations */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             DDaceMainEffects::Response response = factor.getResponse();
+             std::vector<double> responses = response.responses_;
+             ss << response.getSumPop();
+         }
+
+         /* average of all Observation */
+         ss << ",";
+         if (indexOfInputValue==0) {
+          	 DDaceMainEffects::Response response = factor.getResponse();
+             ss << response.getAveragePop();
+         }
+
+         /* sum of squares for Observation */
+         ss << ",";
+         if (indexOfInputValue==0) {
+          	 DDaceMainEffects::Response response = factor.getResponse();
+             ss << response.getSumOfSquaresPop();
+         }
+
+         /* degrees of freedom for all Observation*/
+         ss << ",";
+         if (indexOfInputValue==0) {
+             int dTotal = factor.getNumberOfObservations() - 1;
+             ss << dTotal;
+         }
+
+         /* variance for all Observations */
+         ss << ",";
+         if (indexOfInputValue==0) {
+         	DDaceMainEffects::Response response = factor.getResponse();
+             ss << response.getVariancePop();
+         }
+
+         /* sum */
+         ss << "," << factor.getLevelSum(indexOfInputValue);
+
+         /* average */
+         ss << "," << factor.getLevelAverage(indexOfInputValue);
+
+         /* sum of squares */
+         ss << "," << factor.getLevelSumOfSquares(indexOfInputValue);
+
+         /* variance */
+         ss << "," << factor.getLevelVariance(indexOfInputValue);
+
+         /* sum of squares between groups */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.sumOfSquaresBetweenGroups();
+         }
+
+         /* degrees of freedom between groups */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.doFBetween();
+         }
+
+         /* variance between groups */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.varianceBetweenGroups();
+         }
+
+         /* sum of squares within groups */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.sumOfSquaresWithinGroups();
+         }
+
+         /* degrees of freedom within groups */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.doFWithin();
+         }
+
+         /* variance within groups */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.varianceWithinGroups();
+         }
+
+         /* F */
+         ss << ",";
+         if (indexOfInputValue==0) {
+             ss << factor.Fdata();
+         }
+
+
+         ss << "\n";
+
+         return(ss.str());
+     }
+
+    std::string MainEffectsExcelOutput::computeExcelOutput
+        (std::vector<std::vector<double> > vectorInputData,
+         std::vector<std::vector<double> > vectorOutputData){
+
+        std::ostringstream ss;
+
+    	/* error check */
+    	if (vectorInputData.size() == 0) return("");
+    	if (vectorOutputData.size() == 0) return("");
+
+
+    	MainEffectsConverter converter;
+
+         /* Replace every INPUT data value with a counting number */
+         VectorCountingNumbersAndCount vectorCountingNumbersAndCount =
+             converter.convertAllDoublesToCountingNumbers(vectorInputData);
+         std::vector<std::vector<int> > vectorInputIndicies =
+                vectorCountingNumbersAndCount.vectorCountingNumbers;
+         int numberOfCountingNumbers = vectorCountingNumbersAndCount.count;
+
+        /* How many columns are in the input table? */
+        int numInputs = vectorInputData[0].size();
+
+        /* How many columns are in the output table? */
+        int numOutputs = vectorOutputData[0].size();
+
+        /* output the column headers */
+        ss << outputColumnHeaders (numInputs, numOutputs);
+
+        /* pair input column 1 with output column 1 */
+        /* pair input column 1 with output column 2 */
+        /* pair input column 1 with output column 3 */
+        /* etc.                                     */
+        /* pair input column 2 with output column 1 */
+        /* pair input column 2 with output column 2 */
+        /* pair input column 2 with output column 3 */
+        /* etc.                                     */
+        for (int indexInput=0; indexInput<numInputs; indexInput++) {
+        for (int indexOutput=0; indexOutput<numOutputs; indexOutput++) {
+
+             /* slice out the selected input var & selected output var */
+             DDaceMainEffects::Factor factor =
+                 converter.sliceOutOneInputVarAndOneOutputVar
+                      (vectorInputIndicies,    //data from all input vars
+                       vectorOutputData, //data from all output vars
+                       indexInput,             //slice out this input var
+                       indexOutput,            //slice out this output var
+                       numberOfCountingNumbers);  //# of different input values
+
+
+
+             ss << outputMainEffects (indexInput, numInputs, indexOutput,
+                    numOutputs, factor);
+	     std::cout << ss.str() << std::endl;
+
+
+        }//for indexOutput
+        }//for indexInput
+
+
+    	return(ss.str());
+    }
Index: /issm/trunk/externalpackages/dakota/configs/6.2/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp	(revision 24686)
@@ -0,0 +1,4681 @@
+#include "NKM_SurfPack.hpp"
+#include "NKM_KrigingModel.hpp"
+//#include "Accel.hpp"
+//#include "NKM_LinearRegressionModel.hpp"
+#include <math.h>
+#include <iostream>
+#include <cfloat>
+
+
+#ifdef SURFPACK_HAVE_BOOST_SERIALIZATION
+BOOST_CLASS_EXPORT(nkm::KrigingModel)
+#endif
+
+
+namespace nkm {
+
+using std::cout;
+using std::cerr;
+using std::endl;
+using std::ostringstream;
+
+
+//#define __KRIG_ERR_CHECK__
+#define __NKM_UNBIASED_LIKE__
+
+
+
+// typical constructor
+KrigingModel::KrigingModel(const SurfData& sd, const ParamMap& params)
+  : SurfPackModel(sd,sd.getIOut()), numVarsr(sd.getNVarsr()),
+    numTheta(numVarsr), numPoints(sdBuild.getNPts()), XR(sdBuild.xr)
+{
+  //printf("calling the right KrigingModel constructor\n"); fflush(stdout);
+
+  //if the SurfDataScaler class does what it's supposed to (the only private content in sdBuild that a Model can access are the scaling bits and then only through SurfDataScaler, and only the model can see inside the scaler) the next line will cause an error when you try to compile with it uncommented, that is intentional
+  //printf("scaler->mySd.iout=%d\n",scaler.mySd.iout);
+
+  // OPTIONS PARSING
+  ParamMap::const_iterator param_it;
+
+  // *************************************************************
+  // control verbosity outputLevel
+  // *************************************************************
+  param_it = params.find("verbosity");
+  if (param_it != params.end() && param_it->second.size() > 0)
+    outputLevel=static_cast<short>(std::atoi(param_it->second.c_str()));
+  // outputLevel is a member of nkm::SurfPackModel which nkm::KrigingModel
+  // is derived from
+
+  // ********************************************************************
+  // does the user want to use derivative information to build the
+  // Kriging model (e.g. Gradient Enhanced Kriging)
+  // ********************************************************************
+
+  buildDerOrder=0; //default is regular Kriging (i.e. not GEK)
+  param_it = params.find("derivative_order");
+  if(param_it != params.end() && param_it->second.size() > 0)
+    buildDerOrder = std::atoi(param_it->second.c_str());
+  if(buildDerOrder==0) {//Kriging
+    numEqnAvail=numPoints;
+    nDer=1;
+    Der.newSize(numVarsr,nDer); Der.zero();
+  } else if(buildDerOrder==1) { //Gradient Enhanced Kriging (GEK)
+    numEqnAvail=(1+numVarsr)*numPoints;
+    multi_dim_poly_power(Der, numVarsr, 1);  //use all mixed partial
+    //derivatives, up to first order, of the basis functions
+    nDer=Der.getNCols(); //for GradKrigingModel nDer=(1+numVarsr);
+    //printf("nDer=%d\n",nDer);
+    int data_der_order=sdBuild.getDerOrder();
+    if(data_der_order<1) {
+      std::cerr << "the order of derivative information available in the "
+		<< "build data is " << data_der_order << "\n"
+		<< "You need to supply gradients of the output in order to "
+		<< "construct a\nGradient Enhanced Kriging (GEK) Model."
+		<< std::endl;
+      assert(false);
+    }
+  }
+  else{
+    std::cerr << "derivative_order=" << buildDerOrder
+	      << " in the nkm::KrigingModel constructor.\n"
+	      << "For Kriging you must use derivative_order=0.\n"
+	      << "For Gradient Enhanced Kriging (GEK) you must use "
+	      << "derivative_order=1.\nHigher order derivative "
+	      << "enhanced Kriging (e.g. Hessian Enhanced Kriging)\n"
+	      << "has not been implemented." << std::endl;
+    assert(false);
+  }
+
+  // *************************************************************
+  // detect an anchor point if present this is the one point that
+  // we make sure that the equationSelectingCholR does not discard
+  // *************************************************************
+  iAnchorPoint=0;
+  ifHaveAnchorPoint=false;
+  param_it = params.find("anchor_index");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    ifHaveAnchorPoint=true;
+    //printf("nkm::KrigingModel() sees an anchor point\n");
+    //fflush(stdout);
+    iAnchorPoint=std::atoi(param_it->second.c_str());
+    //printf("iAnchorPoint=%d\n",iAnchorPoint);
+    //fflush(stdout);
+    if(!((0<=iAnchorPoint)&&(iAnchorPoint<numPoints))) {
+      std::cerr << "You can't specify an anchor point that isn't one of "
+		<< "the build points" << std::endl;
+      assert(false);
+    }
+  }
+
+  // *************************************************************
+  // this starts the input section about scaling the data
+  // *************************************************************
+
+  MtxDbl min_max_xr(numVarsr, 2);
+  bool if_user_specified_lower_bounds=false;
+  param_it = params.find("lower_bounds");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    if_user_specified_lower_bounds=true;
+    if(min_max_xr.putCols(param_it->second,0)) {
+      std::cerr << "You didn't enter the right number of lower bounds"
+		<< std::endl;
+      assert(false);
+    }
+  }
+
+  bool if_user_specified_upper_bounds=false;
+  param_it = params.find("upper_bounds");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    if_user_specified_upper_bounds=true;
+    if(min_max_xr.putCols(param_it->second,1)) {
+      std::cerr << "You didn't enter the right number of upper bounds"
+		<< std::endl;
+      assert(false);
+    }
+  }
+
+  if(!(if_user_specified_lower_bounds==if_user_specified_upper_bounds)) {
+    std::cerr << "Your options are to\n(A) specify both the upper and lower, or\n(B) specify neither the upper nor lower,\nbounds of the domain of the Kriging Model\n";
+    assert(false);
+  }
+
+  if(if_user_specified_lower_bounds==true) {
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      if(!(min_max_xr(ixr,0)<=min_max_xr(ixr,1))) {
+	std::cerr << "The lower bound of the domain of the Kriging Model must be less than or equal to the upper bound of the domain of the Kriging Model\n";
+	assert(min_max_xr(ixr,0)<=min_max_xr(ixr,1));
+      }
+    //printf("lower_bounds = [%g",min_max_xr(0,0));
+    //for(int ixr=1; ixr<numVarsr; ++ixr)
+    //printf(", %g",min_max_xr(ixr,0));
+    //printf("]^T, upper_bounds = [%g",min_max_xr(0,1));
+    //for(int ixr=1; ixr<numVarsr; ++ixr)
+    //printf(", %g",min_max_xr(ixr,1));
+    //printf("]^T\n");
+    sdBuild.setUnscaledDomainSize(min_max_xr);
+  }
+
+  //printf("KrigingModel constructor should have just written out domain bounds\n");
+
+  param_it = params.find("dimension_groups");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    MtxInt dim_groups(numVarsr,1);
+    if(dim_groups.putCols(param_it->second,0)) {
+      std::cerr << "If you specify dimension_groups for any dimensions, "
+		<< "you must specify groups\nfor all dimensions. If you "
+		<< "don't want some of the dimensions to be grouped\n"
+		<< "with other dimensions during scaling, give each of "
+		<< "them their own group." << std::endl;
+      assert(false);
+    }
+    sdBuild.setDimGroups(dim_groups);
+  }
+
+  scaler.scaleToDefault(); //scale outputs to -0.5<=Y<=0.5 and scale
+  //real inputs to volume 1 hyper-rectangle centered at 0 if real
+  //input dimensions are locked or the unit hypercube centered at 0 if
+  //no dimensions are locked.  The scaling is done to let us define
+  //the feasible region simply (region is defined in create);
+
+  if(buildDerOrder==0) {
+    sdBuild.getY(Yall);
+    Yall.reshape(numPoints,1);
+  }
+  else if(buildDerOrder==1) {
+    sdBuild.getUpToDerY(Yall,1);
+    Yall.reshape(numEqnAvail,1);
+    //Yall is now a column vector that contains
+    //[y0, dy_0/dxr_0, ..., dy_0/dxr_{numVarsr-1}, y1, dy_1/dxr_0, ..., y2, ...
+    // y_{numPoints-1}, dy_{numPoints-1}/dxr_0, ...,
+    // dy_{numPoints-1}/dx_{numVarsr-1}]^T
+  }
+
+  // *************************************************************
+  // this starts the input section about optimizing or directly
+  // specifying correlation lengths, it must come after the
+  // scaling section
+  // *************************************************************
+
+  // current options are none (fixed correl) | sampling (guess) | local |
+  // global | global_local
+  optimizationMethod = "global"; //the default
+  //optimizationMethod = "none"; //the default
+  param_it = params.find("optimization_method");
+  if (param_it != params.end() && param_it->second.size() > 0)
+    optimizationMethod = param_it->second;
+
+  if(optimizationMethod.compare("none")==0)
+    maxTrials=1;
+  else if(optimizationMethod.compare("local")==0)
+    maxTrials=20;
+  else if(optimizationMethod.compare("sampling")==0)
+    maxTrials=2*numVarsr+1;
+  else if(optimizationMethod.compare("global")==0)
+    maxTrials = 10000;
+  else if(optimizationMethod.compare("global_local")==0) {
+    maxTrials = 10000; //ensure it has non-zero as a fail safe but this
+    //shouldn't be used
+    maxTrialsGlobal = 500;
+    maxTrialsLocal = 20;
+  }
+  else{ //error checking the input
+    std::cerr << "KrigingModel() unknown optimization_method [" << optimizationMethod << "]  aborting\n";
+    assert(false);
+  }
+
+  //std::cout << "optimization_method=\"" << optimizationMethod << "\"\n";
+
+  //numStarts is the number of starting locations in a multi-start local search
+  numStarts=1; //default is a single starting location
+  param_it = params.find("num_starts");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    numStarts = std::atoi(param_it->second.c_str());
+    if(numStarts<1) {
+      std::cerr << "You can't specify fewer than one starting location "
+		<< "for the optimization\nof correlation lenghts"
+		<< std::endl;
+      assert(false);
+    }
+  }
+
+  if(!((numStarts==1)||(optimizationMethod.compare("local")==0))) {
+    std::cerr << "Local optimization is the only optimization method for Kriging that uses the \"num_starts\" key word. Check your input file for errors.\n";
+    assert(false);
+  }
+
+  //std::cout << "num_starts=" << numStarts << "\n";
+
+
+  // does the user want to specify correlation lengths directly?
+  ifUserSpecifiedCorrLengths=false; //the default is no
+  param_it = params.find("correlation_lengths");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    ifUserSpecifiedCorrLengths=true;
+    //printf("User specifying correlation lengths\n"); fflush(stdout);
+
+    // make sure that the user didn't
+    // * say they want to global optimize __AND__
+    // * specify correlation lengths
+    if(optimizationMethod.compare("global")==0) {
+      std::cerr << "You can't both \n (A) use the global optimization method to choose, and \n (B) directly specify \n correlation lengths for the Kriging model.\n";
+      assert(false);
+    }
+    else if(optimizationMethod.compare("global_local")==0) {
+      //they can't coarse global followed by local either
+      std::cerr << "You can't both \n (A) use the coarse global polished by local optimization method to choose, and \n (B) directly specify \n correlation lengths for the Kriging model.\n";
+      assert(false);
+    }
+    else if(optimizationMethod.compare("sampling")==0) {
+      // this is only the default number of samples/maxTrials; the user can
+      // still overide this below
+      maxTrials+=1;
+    }
+
+    natLogCorrLen.newSize(numVarsr,1); //allocate space
+
+    //read the correlation lengths in from the string
+    if(natLogCorrLen.putCols(param_it->second,0)) {
+      std::cerr << "The specified correlation lengths had the wrong "
+		<< "number of input dimensions." << std::endl;
+      assert(false);
+    }
+    // "natLogCorrLen" currently holds the unscaled correlation LENGTHS, not
+    // the natural log of the scaled correlation length, we need to fix that
+    // but first we need to check the input for errors
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      if(!(natLogCorrLen(ixr,0)>0.0)) {
+	std::cerr << "For the Kriging Model, correlation lengths must be strictly positive\n.";
+	assert(false);
+      }
+
+    //printf("unscaled corr lens = [%12.6g",natLogCorrLen(0,0));
+    //for(int ixr=1; ixr<numVarsr; ++ixr)
+    //printf(", %12.6g",natLogCorrLen(ixr,0));
+    //printf("]\n");
+
+    scaler.scaleXrDist(natLogCorrLen); //scale the lengths
+    //scaler.scaleXrOther(natLogCorrLen); //error
+    //printf("scaled corr lens = [%12.6g",natLogCorrLen(0,0));
+    //for(int ixr=1; ixr<numVarsr; ++ixr)
+    // printf(", %12.6g",natLogCorrLen(ixr,0));
+    //printf("]\n");
+    //fflush(stdout);
+
+    //compute the natural log of the correlation lengths
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      natLogCorrLen(ixr,0)=std::log(natLogCorrLen(ixr,0));
+
+    natLogCorrLen.reshape(numVarsr,1);
+    //natLogCorrLen will be the first of the initial iterates (guesses), this happens in the create() function below
+  }
+  //printf("If user specified correlationlengths we should have just printed them\n");
+
+  // maximum objective evals for optimization or guess
+  param_it = params.find("max_trials");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    maxTrials = std::atoi(param_it->second.c_str());
+  }
+
+  if(!(maxTrials > 0)) {
+    std::cerr << "You can't specify a maximum number of trials that is "
+	      << "less than or equal\nto zero." << std::endl;
+    assert(false);
+  }
+
+  //printf("maxTrials=%d\n",maxTrials);
+
+
+  // *************************************************************
+  // this starts the input section about the trend function
+  // *************************************************************
+  polyOrderRequested = 2;
+  ifReducedPoly=false;
+  param_it = params.find("order");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    polyOrderRequested = std::atoi(param_it->second.c_str());
+    //ssstd::cerr << "polyOrderRequested=" << polyOrderRequested << std::endl;
+    if(!(polyOrderRequested >= 0)) {
+      std::cerr << "You can't use a trend function with a polynomial "
+		<< "order less than zero." << std::endl;
+      assert(false);
+    }
+  }
+  else{
+    //if they don't specify a polynomial order use a main effects
+    //polynomial with order 2 for the trend function, (if they do
+    //specify a polynomial order assume they mean a full polynomial
+    //order unless they specify that it's a reduced_polynomial)
+    ifReducedPoly=true;
+  }
+  numTrend.newSize(polyOrderRequested+1,1);
+
+  //cout << "order=" << polyOrder << "\n";
+
+  //polyOrder = 2; //for debug
+  //main_effects_poly_power(Poly, numVarsr, polyOrder); //for debug
+  //commented out for debug
+
+  param_it = params.find("reduced_polynomial");
+  if (param_it != params.end() && param_it->second.size() > 0)
+    if((std::atoi(param_it->second.c_str()))!=0)
+      ifReducedPoly=true;
+
+  //cout << "ifReducedPoly=" << ifReducedPoly << "\n";
+
+  if(ifReducedPoly) {
+    for(polyOrder=0; polyOrder<=polyOrderRequested; ++polyOrder)
+      numTrend(polyOrder,0)=polyOrder*numVarsr+1;
+    main_effects_poly_power(Poly, numVarsr, polyOrderRequested);
+  }
+  else{
+    for(polyOrder=0; polyOrder<=polyOrderRequested; ++polyOrder)
+      numTrend(polyOrder,0)=num_multi_dim_poly_coef(numVarsr, polyOrder);
+    multi_dim_poly_power(Poly, numVarsr, polyOrderRequested);
+  }
+
+
+  // ********************************************************************
+  // this starts the section about the choice of correlation functions
+  // need to do build derivative order before this
+  // ********************************************************************
+  corrFunc=DEFAULT_CORR_FUNC;
+
+  //POW_EXP_CORR_FUNC
+  powExpCorrFuncPow=0.0; //only 1.0<=powExpCorrFunc<=2.0 are allowed
+  //later if corrFunc==POW_EXP_CORR_FUNC and powExpCorrFuncPow==0.0 we know
+  //we have an error
+  param_it = params.find("powered_exponential");
+  if(param_it != params.end() && param_it->second.size() > 0) {
+    if(corrFunc!=DEFAULT_CORR_FUNC) {
+      std::cerr << "You can only specify one correlation function\n";
+      assert(false);
+    }
+    corrFunc=POW_EXP_CORR_FUNC;
+    powExpCorrFuncPow=std::atof(param_it->second.c_str());
+    if(!((1.0<=powExpCorrFuncPow)&&(powExpCorrFuncPow<=2.0))){
+      std::cerr << "The powered exponential correlation function must have 1.0<=power<=2.0\n";
+      assert(false);
+    }
+    //need to require 1<powExpCorrFuncPow if first derivatives are used
+    //(otherwise no derivative is continuous at build points
+    //will need to require powExpCorrFuncPow==2 of 2nd or higher order
+    //derivatives are used
+    if(powExpCorrFuncPow==1.0)
+      corrFunc=EXP_CORR_FUNC;
+    else if(powExpCorrFuncPow==2.0)
+      corrFunc=GAUSSIAN_CORR_FUNC;
+  }
+
+  //MATERN_CORR_FUNC
+  maternCorrFuncNu=0.0; //only 0.5, 1.5, 2.5, and infinity will be allowed
+  //later if corrFunc==MATERN_CORR_FUNC and maternCorrFuncNu=0.0 we know
+  //we have an error
+  param_it = params.find("matern");
+  if(param_it != params.end() && param_it->second.size() > 0) {
+    if(corrFunc!=DEFAULT_CORR_FUNC) {
+      std::cerr << "You can only specify one correlation function\n";
+      assert(false);
+    }
+    if(param_it->second.compare("infinity")==0) {
+      corrFunc=GAUSSIAN_CORR_FUNC;
+      //matern nu=infinty is the Gaussian correlation function
+    }
+    else{
+      corrFunc=MATERN_CORR_FUNC;
+      maternCorrFuncNu=std::atof(param_it->second.c_str());
+      if(!((maternCorrFuncNu==0.5)||(maternCorrFuncNu==1.5)||
+	   (maternCorrFuncNu==2.5))) {
+	//could allow more later if 3rd+ order derivatives are enabled later
+	std::cerr << "For the Matern correlation function the only allowed values for nu are 0.5, 1.5, 2.5, and infinity\n";
+	assert(false);
+      }
+      if(maternCorrFuncNu==0.5) {
+	corrFunc=EXP_CORR_FUNC; //matern nu=0.5 is the exponential correlation function
+	//need to disallow maternCorrFuncNu=0.5 if gradients or higher order derivatives are used to construct the Kriging model
+      }
+      //need to disallow maternCorrFuncNu=1.5 it hessians or higher order derivatives are used to construct the Kriging model
+    }
+  }
+
+  if(corrFunc==DEFAULT_CORR_FUNC)
+    corrFunc=GAUSSIAN_CORR_FUNC;
+
+  // *************************************************************
+  // this starts the input section HOW to bound the condition
+  // number, this determines which derivatives of the constraint
+  // function can be computed analytically so handle that here too
+  // *************************************************************
+  //constraintType="rcond"; //rcond is now the only option for type of
+  //constraint against ill conditioning
+  numConFunc=1;
+
+  //convert to the Dakota bitflag convention for derivative orders
+  int num_analytic_obj_ders_in=0; //analytical derivatives have been removed
+  int num_analytic_con_ders_in=0; //analytical derivatives have been removed
+  maxObjDerMode=(static_cast<int>(std::pow(2.0,num_analytic_obj_ders_in+1)))-1; //analytical gradients of objective function
+  maxConDerMode=(static_cast<int> (std::pow(2.0,num_analytic_con_ders_in+1)))-1; //analytical gradients of constraint function(s)
+
+  maxCondNum=std::pow(1024.0,4);
+
+  // *************************************************************
+  // this starts the input section about the nugget which can be
+  // used to smooth the data and also decrease the condition
+  // number
+  // *************************************************************
+
+  ifChooseNug = false;
+  //ifChooseNug = true;
+  ifAssumeRcondZero=false;
+  param_it = params.find("find_nugget");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    ifChooseNug = true;
+    int zero_or_one = std::atoi(param_it->second.c_str());
+    if(zero_or_one==0)
+      ifAssumeRcondZero=true;
+  }
+  //ifChooseNug = true ; std::cout << "ifChooseNug=" << ifChooseNug << "\n";
+
+  // fixed value for now
+  nug = 0.0; //default
+  ifPrescribedNug=false;
+  param_it = params.find("nugget");
+  if (param_it != params.end() && param_it->second.size() > 0) {
+    if(!(ifChooseNug==false)) {
+      std::cerr << "You can do at most 1 of the following (A) auto-select "
+		<< "the nugget\n(approximately the minimum needed to "
+		<< "satisfy the condition number bound)\n(B) directly "
+		<< "specify a nugget.  The default is not to use a nugget "
+		<< "at all\n(i.e. use a nugget of zero)." << std::endl;
+      assert(false);
+    }
+    nug = std::atof(param_it->second.c_str());
+    if(!(nug >= 0.0)) {
+      std::cerr << "The nugget must be greater than or equal to zero."
+		<< std::endl;
+      assert(false);
+    }
+    ifPrescribedNug=true;
+  }
+
+  // *************************************************************
+  // this ends the input parsing now finish up the prep work
+  // *************************************************************
+  preAllocateMaxMemory(); //so we don't have to constantly dynamically
+  //allocate, the SurfMat class can use a subset of the allocated memory
+  //without using dynamic reallocation
+
+  // precompute and store the trend function for all build points
+  if(buildDerOrder==0) //for Kriging
+    eval_trend_fn(Gall, XR);
+  else if(buildDerOrder>=1) { //for GEK
+    //actually this is generic to higher order derivative enhanced Kriging
+    //(e.g. Hessian Enhanced Kriging) provided that Der is appropriately
+    //defined
+    eval_der_trend_fn(Gall, Der, XR);
+  } else{
+    std::cerr << "bogus buildDerOrder=" << buildDerOrder
+	      << " in the constructor when evaluating Gall" << std::endl;
+    assert(false);
+  }
+
+  if((ifChooseNug==true)||(ifPrescribedNug==true)) {
+    //if we're using a nugget then we aren't using pivoted cholesky to
+    //select an optimal subset of points, that means that the order of
+    //points aren't going to change so we'll set Y and Gtran to what
+    //we know they need to be
+    iPtsKeep.newSize(numPoints,1);
+    for(int ipt=0; ipt<numPoints; ++ipt)
+      iPtsKeep(ipt,0)=ipt;
+
+    Y.copy(Yall);
+
+    //polyOrder=polyOrderRequested;
+    nTrend=numTrend(polyOrderRequested,0);
+    Gtran.newSize(numEqnAvail,nTrend);
+    for(int itrend=0; itrend<nTrend; ++itrend)
+      for(int i=0; i<numEqnAvail; ++i)
+	Gtran(i,itrend)=Gall(itrend,i);
+
+    if(buildDerOrder==0)
+      numExtraDerKeep=0;
+    else if(buildDerOrder==1)
+      numExtraDerKeep=numVarsr;
+    else{
+      std::cerr << "buildDerOrder=" << buildDerOrder
+		<< " in void KrigingModel::nuggetSelectingCholR(); "
+		<< "for Kriging buildDerOrder must be 0; "
+		<< "for Gradient Enhanced Kriging buildDerOrder must be 1; "
+		<< "Higher order derivative enhanced Kriging "
+		<< "(e.g Hessian Enhanced Kriging) has not been implemented"
+		<< std::endl;
+      assert(false);
+    }
+    numPointsKeep=numPoints;
+    numRowsR=numEqnAvail;
+  }
+
+  gen_Z_matrix();  //initializes deltaXR and Z matrices (what the Z
+  // matrix contains depends on the choose of correlation function but
+  // as of 2012.06.25 all correlation functions involve an
+  // coefficient*exp(Z^T*theta) (reshaped to the lower triangular part of R)
+  // for the powered Exponential family of correlation functions that
+  // coefficient is 1.0, for Matern 1.5 and Matern 2.5 correlation functions
+  // it's not 1.0 and we only do the multiplcation when the coefficient isn't
+  // 1.0 to save computation.
+
+  //printf("completed the KrigingModel constructor\n"); fflush(stdout);
+}
+
+void KrigingModel::create()
+{
+  //printf("entered create()\n"); fflush(stdout);
+
+  prevObjDerMode=prevConDerMode=0; //tells us not to reuse previous work used
+  //to calculate the objective, constraints and their derivatives the first
+  //time they are asked for
+  prevTheta.newSize(numTheta,1);
+  prevTheta.zero(); //not necessary just useful to debug
+
+  //printf("KM.create():1: nug=%g\n",nug);
+
+  // -
+  // solve the optimization problem for the correlations
+  // -
+
+  //printf("numVarsr=%d\n",numVarsr); fflush(stdout);
+  OptimizationProblem opt(*this, numVarsr, numConFunc);
+
+
+  // set the bounds for the plausible region for correlation lengths
+  // (assumes input space has a volume of 1, and data points are
+  // uniformly distributed)
+  aveDistBetweenPts=std::pow(numPoints,-1.0/numVarsr);
+
+  // note: we should explore different bounds on the correlation lengths
+  // for different choices of the correlation function, but this has not
+  // been done yet, the "procedure" for determining how far the lengths
+  // should extend is to say that X% probability mass is located a
+  // certain distance away (5% at 16 neighbors for the upper bound, and
+  // for the lower bound you want the nearest neighbor to be
+  // "essentially uncorrelated" while halfway between nearest neigbors
+  // is slightly correlated)
+
+  // For the maximum correlation length = aveDistBetweenPts*8.0
+  // the Gaussian Correlation function (the original one this GP a.k.a.
+  // Kriging model was developed for) has about ~5% confidence (2 std
+  // devs away) in what points 16 neighbors away have to say. If points
+  // are correlated well at even greater distances then either
+  // * that same information will be contained in nearby points OR
+  // * you shouldn't be using a Gaussian process error model
+  double max_corr_length = aveDistBetweenPts*8.0;
+  maxNatLogCorrLen=std::log(max_corr_length);
+
+  // For the minimum correlation length = aveDistBetweenPts/4.0
+  // the Gaussian Correlation function (the original one this GP a.k.a.
+  // Kriging model was developed for) has about ~5% confidence (2 std
+  // midway between neighboring points... i.e. you're 4 std devs away
+  // from your nearest neighbor so all sample points are treated as being
+  // essentially uncorrelated
+  double min_corr_length = aveDistBetweenPts/4.0;
+  minNatLogCorrLen=std::log(min_corr_length);
+
+  //Choose dead center (in log(correlation length)) of the feasible region
+  //as the default initial guess for the Gaussian Process error model
+  double init_guess=0.5*(maxNatLogCorrLen+minNatLogCorrLen);
+
+  ///set the bounds and the initial iterates
+  if(ifUserSpecifiedCorrLengths==true) {
+    // the first guess is what the user told us he/she wanted to use
+    for(int ixr=0; ixr<numVarsr; ++ixr) {
+      opt.lower_bound(ixr, minNatLogCorrLen);
+      opt.upper_bound(ixr, maxNatLogCorrLen);
+      opt.initial_iterate(ixr, natLogCorrLen(ixr,0));
+    }
+    // the second guess is the center of the small feasible region
+    MtxDbl the_second_guess(numVarsr,1);
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      the_second_guess(ixr,0)=init_guess;
+    opt.add_initial_iterates(the_second_guess);
+  } else {
+
+    // since the user didn't specify an initial guess we will use the center
+    // of the small feasible region as our first initial guess
+    for(int ixr=0; ixr< numVarsr; ++ixr) {
+      opt.lower_bound(ixr, minNatLogCorrLen);
+      opt.upper_bound(ixr, maxNatLogCorrLen);
+      opt.initial_iterate(ixr, init_guess);
+    }
+  }
+
+  // add a binning optimal (read as space filling) random design with
+  // 2*numVars more guesses
+  // bins are the endpoints of a randomly rotated axis
+  MtxDbl axes_of_guesses(numVarsr,2*numVarsr);
+  gen_rand_axis_bin_opt_samples_0to1(axes_of_guesses,numVarsr);
+  for(int iguess=0; iguess<2*numVarsr; ++iguess) {
+    for(int ixr=0; ixr<numVarsr; ++ixr) {
+      axes_of_guesses(ixr,iguess)=(maxNatLogCorrLen-minNatLogCorrLen)*axes_of_guesses(ixr,iguess)+minNatLogCorrLen;
+    }
+  }
+  opt.add_initial_iterates(axes_of_guesses);
+
+  //choose the optimizer you want to use
+  if(optimizationMethod.compare("none")==0) {
+    natLogCorrLen.resize(numVarsr,1);
+    opt.retrieve_initial_iterate(0,natLogCorrLen);
+  }
+  else{
+    if(optimizationMethod.compare("local")==0) {
+      //local optimization
+      if(numStarts==1)
+	//from a single starting location
+	opt.conmin_optimize();
+      else{
+	//doing multi-start local optimization
+	opt.multistart_conmin_optimize(numStarts);
+      }
+    }
+    else if(optimizationMethod.compare("global")==0)
+      //global optimization via the "DIvision of RECTangles" method
+      opt.direct_optimize();
+    else if(optimizationMethod.compare("sampling")==0)
+      //randomly generate candidates and pick the best guess
+      opt.best_guess_optimize(maxTrials);
+    else if(optimizationMethod.compare("global_local")==0){
+      //a coarse global optimization that is polished by
+      //local optimization
+      maxTrials=maxTrialsGlobal;
+      opt.direct_optimize();
+      natLogCorrLen = opt.best_point();
+      maxTrials=maxTrialsLocal;
+      opt.conmin_optimize();
+    }
+    else{
+      std::cerr << "KrigingModel:create() unknown optimization_method [" << optimizationMethod << "]\naborting" << std::endl;
+      assert(false);
+    }
+    natLogCorrLen = opt.best_point();
+  }
+
+  MtxDbl corr_len(numVarsr,1);
+  for(int ixr=0; ixr<numVarsr; ++ixr)
+    corr_len(ixr,0)=std::exp(natLogCorrLen(ixr,0));
+  correlations.newSize(numVarsr,1);
+  get_theta_from_corr_len(correlations,corr_len);
+
+  //printf("}\n");
+
+  //printf("scaled correlations=[%12.6g",correlations(0,0));
+  //for(int ixr=1; ixr<numVarsr; ++ixr)
+  //printf(", %12.6g",correlations(ixr,0));
+  //printf("]\n");
+
+  masterObjectiveAndConstraints(correlations, 1, 0);
+
+  //keep only the "optimal" subset of trend basis function in Poly that was
+  //selected by the pivoted Cholesky factorization of G*R^-1*G^
+  if(nTrend<numTrend(polyOrderRequested,0)) {
+    //for this to work, the basis function indicices in iTrendKeep must
+    //be in monotonically increasing order
+
+    //we are guaranteed to keep the constant term of the trend function so
+    //start loop from 1 not zero
+    for(int itrend=1; itrend<nTrend; ++itrend) {
+      int isrc=iTrendKeep(itrend,0);
+      if(itrend<isrc)
+	for(int ixr=0; ixr<numVarsr; ++ixr)
+	   Poly(ixr,itrend)=Poly(ixr,isrc);
+    }
+
+    //now reduce the size of Poly
+    Poly.resize(numVarsr,nTrend);
+  }
+
+  //determine the maximum total order of any term in the part of the
+  //trend that was retained
+  polyOrder=Poly(0,nTrend-1);
+  for(int ixr=1; ixr<numVarsr; ++ixr)
+    polyOrder+=Poly(ixr,nTrend-1);
+
+
+
+  //make a reordered copy of (the retained portion of) XR for evaluation speed
+  XRreorder.newSize(numVarsr,numPointsKeep);
+  for(int ipt=0; ipt<numPointsKeep; ++ipt) {
+    int isrc=iPtsKeep(ipt,0);
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      XRreorder(ixr,ipt)=XR(ixr,isrc);
+  }
+
+  if(outputLevel >= NORMAL_OUTPUT) {
+    std::cout << model_summary_string();
+    //std::cout << std::endl;
+  }
+
+
+  //variables whose values needed to be retained between sequential call to masterObjectiveAndConstraints for precompute and store strategy to work
+  prevObjDerMode=prevConDerMode=0;
+
+  //deallocate matrices we no longer need after emulator has been created
+  //these were made member variables (instead of local variables) to avoid
+  //the cost of dynamic allocation and deallocation each cycle of the
+  //optimization of the correlation parameters
+  scaleRChol.clear(); //matrix
+  sumAbsColR.clear(); //vector
+  oneNormR.clear(); //vector
+  lapackRcondR.clear(); //vector
+  rcondDblWork.clear();  //vector
+  rcondIntWork.clear(); //vector
+  Yall.clear(); //vector
+  Gall.clear(); //matrix
+  Gtran.clear(); //matrix
+  iTrendKeep.clear(); //vector
+  Z.clear(); //matrix
+  Ztran_theta.clear(); //vector
+  deltaXR.clear(); //matrix
+  R.clear(); //matrix
+  G_Rinv_Gtran.clear(); //matrix
+  G_Rinv_Gtran_Chol_Scale.clear(); //vector
+  G_Rinv_Gtran_Chol_DblWork.clear(); //vector
+  G_Rinv_Gtran_Chol_IntWork.clear(); //vector
+  G_Rinv_Y.clear(); //vector
+  eps.clear(); //vector
+  prevTheta.clear(); //vector
+  con.clear(); //vector
+}
+
+std::string KrigingModel::get_corr_func() const {
+  std::ostringstream oss;
+
+  switch(corrFunc) {
+  case GAUSSIAN_CORR_FUNC:
+    oss << "Gaussian";
+    break;
+  case EXP_CORR_FUNC:
+    oss << "exponential";
+    break;
+  case POW_EXP_CORR_FUNC:
+    oss << "powered exponential with power=" << powExpCorrFuncPow;
+    break;
+  case MATERN_CORR_FUNC:
+    oss << "Matern " << static_cast<int>(maternCorrFuncNu*2.0) << "/2";
+    break;
+  default:
+    std::cerr << "unknown correlation function enumerated as " << corrFunc
+	      << std::endl;
+    assert(false);
+  }
+  return (oss.str());
+}
+
+
+std::string KrigingModel::model_summary_string() const {
+  MtxDbl temp_out_corr_lengths(numVarsr,1);
+  get_corr_len_from_theta(temp_out_corr_lengths,correlations);
+  scaler.unScaleXrDist(temp_out_corr_lengths);
+
+  //printf("numPoints=%d numTrend=%d numPointsKeep=%d numWholePointsKeep=%d numExtraDerKeep=%d\n",numPoints,numTrend(polyOrder,0),numPointsKeep,numWholePointsKeep,numExtraDerKeep);
+
+  std::ostringstream oss;
+  oss << "--- Surfpack Kriging Diagnostics ---\n";
+  if(buildDerOrder==0)
+    oss << "KM: #real inputs=" << numVarsr << "; #pts=" << numPoints
+	<< "; used " << numPointsKeep << "/" << numPoints << " pts;\n";
+  else if(buildDerOrder==1)
+    oss << "GEK: #real inputs=" << numVarsr << "; #pts=" << numPoints
+	<< "; #eqns=" << numEqnAvail << "; used "
+	<< numRowsR << "/" << numEqnAvail << " eqns;\n";
+  else{
+    oss << "error std::string KrigingModel::model_summary_string() const\n"
+	<< "buildDerOrder=" << buildDerOrder << "; it should be 0 for Kriging"
+	<< " or 1 for Gradient Enhanced Kriging (GEK);"
+	<< " the model_summary_string() function will need to be modified "
+	<< "to handle other build derivative orders.\n";
+  }
+  oss << "using the ";
+  if(corrFunc==GAUSSIAN_CORR_FUNC)
+    oss << "Gaussian";
+  else if(corrFunc==EXP_CORR_FUNC)
+    oss << "exponential";
+  else if(corrFunc==POW_EXP_CORR_FUNC)
+    oss << "powered exponential (with power = " << powExpCorrFuncPow << ")";
+  else if(corrFunc==MATERN_CORR_FUNC)
+    oss << "Matern " << maternCorrFuncNu;
+  else{
+    std::cerr << "unknown corr func in model_summary_string()" << std::endl;
+    assert(false);
+  }
+  oss << " correlation function with (unscaled)\n"
+      << "Correlation lengths=[" << temp_out_corr_lengths(0,0);
+  for(int ixr=1; ixr<numVarsr; ++ixr)
+    oss << ", " << temp_out_corr_lengths(ixr,0);
+  oss << "]^T\nfound by the \"" << optimizationMethod
+      << "\" optimization_method;\nunadjusted variance="
+      << estVarianceMLE * scaler.unScaleFactorVarY()
+      << "; \"per equation\" log(likelihood)=" << likelihood << ";\n"
+      << "rcond(R)=" << rcondR << "; rcond(G_Rinv_Gtran)="
+      << rcond_G_Rinv_Gtran << "; [if either rcond is less\n"
+      << "than 2^-40 (approx 9.095*10^-13) then the matrix is ill-conditioned "
+      << "and\nthat \"voids the warranty\" of the Kriging Model]; nugget="
+      << nug << ".  A ";
+  if(polyOrder>1) {
+    if(ifReducedPoly==true)
+      oss << "reduced_";
+    else oss <<"full ";
+  }
+  oss << "polynomial\nof order " << polyOrderRequested << " (with "
+      << numTrend(polyOrderRequested,0) << " terms) was requested "
+      << "for the trend function; the build\ndata was ";
+  if(nTrend<numTrend(polyOrderRequested,0) )
+    oss << "NOT ";
+  oss << "sufficient to use the requested trend function; "
+      << "the highest total\npolynomial order of any term in the "
+      << "utlized trend function is " << polyOrder << ";\n"
+      << "for SCALED inputs and outputs the utilized trend function is\n"
+      << "betaHat^T*g(x)=";
+  int nterm_on_this_line=0;
+  for(int itrend=0; itrend<nTrend; ++itrend) {
+    ++nterm_on_this_line;
+    oss << betaHat(itrend,0);
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      if(Poly(ixr,itrend)>0) {
+	oss << "*x" << ixr;
+	if(Poly(ixr,itrend)>1)
+	  oss << "^" << Poly(ixr,itrend);
+      }
+    if(itrend<nTrend-1) {
+      oss << " ";
+      if(betaHat(itrend+1,0)>=0.0)
+	oss << "+ ";
+      if(nterm_on_this_line==3) {
+	oss << "...\n               ";
+	nterm_on_this_line=0;
+      }
+    }
+  }
+  oss << "\n------------------------------------\n";
+  return (oss.str());
+}
+
+void KrigingModel::preAllocateMaxMemory() {
+  //this preallocates the maximum sizce of arrays whose size depends on how many equations were discarded by pivoted Cholesky and they could possibly be allocated to a different size than their maximum the first time they are allocated.
+
+  nTrend=numTrend(polyOrderRequested,0);
+  Y.newSize(numEqnAvail,1);
+  Gtran.newSize(numEqnAvail,nTrend);
+  Rinv_Gtran.newSize(numEqnAvail,nTrend);
+  G_Rinv_Gtran.newSize(nTrend,nTrend);
+  G_Rinv_Gtran_Chol.newSize(nTrend,nTrend);
+  rhs.newSize(numEqnAvail,1);
+  betaHat.newSize(nTrend,1);
+  G_Rinv_Y.newSize(nTrend,1);
+  eps.newSize(numEqnAvail,1);
+  iPtsKeep.newSize(numPoints,1);
+  RChol.newSize(numEqnAvail,numEqnAvail);
+  int nrows = nTrend;
+  if((ifChooseNug==false)&&(ifPrescribedNug==false)&&(numEqnAvail>nTrend))
+    nrows=numEqnAvail;
+  scaleRChol.newSize(nrows,3);
+  lapackRcondR.newSize(nrows,1);
+
+  return;
+}
+
+// BMA TODO: combine these two functions?
+
+/// evaluate (y) the Kriging Model at a single point (xr)
+double KrigingModel::evaluate(const MtxDbl& xr)
+{
+  if(buildDerOrder==0) {
+    //you wouldn't want to do this for Gradient Enhanced Kriging
+    //i.e. if gradients of y were used as inputs
+    double singular_y;
+    if(scaler.isYSingular(0,singular_y))
+      return singular_y;
+  }
+
+  //assert( (numVarsr == xr.getNCols()) && (xr.getNRows() == 1) );
+  MtxDbl g(nTrend,1), r(numRowsR,1);
+
+  /*
+  printf("double evaluate()\n");
+  printf("xr=[%20.14g", xr(0,0));
+  for(int ixr=1; ixr<numVarsr; ++ixr)
+    printf(", %20.14g",xr(ixr,0));
+    printf("]^T\n");
+  */
+
+  if(scaler.isUnScaled()) {
+    eval_trend_fn(g, xr);
+    correlation_matrix(r, xr);
+  }
+  else{
+    MtxDbl xr_scaled(xr);
+    scaler.scaleXrOther(xr_scaled);
+    eval_trend_fn(g, xr_scaled);
+    correlation_matrix(r, xr_scaled);
+  }
+
+  double y = dot_product(g, betaHat) + dot_product(r, rhs);
+
+  //double yus=scaler.unScaleYOther(y);
+  //printf("y=%g yunscaled=%g\n",y,yus);
+  //return yus;
+
+  return (scaler.unScaleYOther(y));
+}
+
+
+/// evaluate (y) the Kriging Model at a collection of points (xr)
+MtxDbl& KrigingModel::evaluate(MtxDbl& y, const MtxDbl& xr)
+{
+  int nptsxr=xr.getNCols();
+  //printf("nptsxr=%d nvarsrxr=%d",nptsxr,xr.getNCols());
+
+  y.newSize(1,nptsxr);
+  if(buildDerOrder==0) {
+    //you wouldn't want to do this for Gradient Enhanced Kriging
+    //i.e. if gradients of y were used as inputs
+    double singular_y;
+    if(scaler.isYSingular(0,singular_y)) {
+      for(int ipt=0; ipt<nptsxr; ++ipt)
+	y(0,ipt)=singular_y;
+      return y;
+    }
+  }
+  //assert(numVarsr == xr.getNRows());
+  MtxDbl g(nTrend, nptsxr), r(numRowsR, nptsxr);
+
+  if(scaler.isUnScaled()) {
+    eval_trend_fn(g, xr);
+    correlation_matrix(r, xr);
+  }
+  else{
+    MtxDbl xr_scaled(xr);
+    scaler.scaleXrOther(xr_scaled);
+    eval_trend_fn(g, xr_scaled);
+    correlation_matrix(r, xr_scaled);
+  }
+
+  //y=0.0*y+1.0*betaHat^T*g => y = betaHat^T*g
+  matrix_mult(y, betaHat, g, 0.0, 1.0,'T','N');
+
+  //y=1.0*y+1.0*r*rhs where rhs=R^-1*(Y-G(XR)^T*betaHat), initial y=betaHat^T*g => y=betaHat^T*g+rhs^T*r
+  matrix_mult(y, rhs    , r, 1.0, 1.0,'T','N');
+
+  scaler.unScaleYOther(y);
+
+  //printf("y is correct for ValidateMain because it isn't being unscaled\n");
+
+  return y;
+}
+
+MtxDbl& KrigingModel::evaluate_d1y(MtxDbl& d1y, const MtxDbl& xr)
+{
+  int nptsxr=xr.getNCols();
+#ifdef __KRIG_ERR_CHECK__
+  assert((numVarsr == xr.getNRows())&&(0<nptsxr));
+#endif
+  d1y.newSize(numVarsr, nptsxr);
+  if(buildDerOrder==0) {
+    //you wouldn't want to do this for Gradient Enhanced Kriging
+    //i.e. if gradients of y were used as inputs
+    double singular_y;
+    if(scaler.isYSingular(0,singular_y)) {
+      d1y.zero();
+      return d1y;
+    }
+  }
+
+  /*
+  printf("evaluate_d1y()\n");
+  for(int ipt=0; ipt<numPoints; ++ipt) {
+    printf("XR(:,%3d)=[%12.6g",ipt,XR(0,ipt));
+    for(int ixr=1; ixr<numVarsr; ++ixr)
+      printf(", %12.6g",XR(ixr,ipt));
+    printf("]^T Y(%3d)=%12.6g\n",ipt,Y(0,ipt));
+  }
+  */
+
+  MtxDbl xr_scaled(xr);
+  if(~(scaler.isUnScaled())) {
+    //printf("scaling xr_scaled\n");
+    scaler.scaleXrOther(xr_scaled);
+  }
+
+  /*
+  printf("xr       =[%12.6g, %12.6g]\n",xr(0,0),xr(1,0));
+  printf("xr_scaled=[%12.6g, %12.6g]\n",xr_scaled(0,0),xr_scaled(1,0));
+  */
+
+  int nder=num_multi_dim_poly_coef(numVarsr,-1);
+  MtxInt der(numVarsr,nder);
+  multi_dim_poly_power(der,numVarsr,-1); //equivalent to der.identity();
+
+  evaluate_poly_der(d1y,flyPoly,derivBetaHat,Poly,der,betaHat,xr_scaled);
+
+  MtxDbl r(numRowsR,nptsxr);
+  correlation_matrix(r, xr_scaled);
+  //apply_nugget_eval(r);
+  MtxDbl d1r(numRowsR,nptsxr);
+  MtxDbl temp_vec(1,nptsxr);
+
+  for(int ider=0; ider<nder; ++ider) {
+
+    //find the single dimension we are taking the first derviative of
+    int ixr;
+    for(ixr=0; ixr<numVarsr; ++ixr)
+      if(der(ixr,ider)>0)
+	break;
+    //printf("ixr=%d ",ixr);
+#ifdef __KRIG_ERR_CHECK__
+    assert(ixr==ider);
+#endif
+
+    double d1y_unscale_factor=scaler.unScaleFactorDerY(ixr);
+    //printf("d1y_usf=%g\n",d1y_unscale_factor);
+
+    dcorrelation_matrix_dxI(d1r, r, xr_scaled, ixr);
+    matrix_mult(temp_vec,rhs,d1r,0.0,1.0,'T');
+
+    for(int ipt=0; ipt<nptsxr; ++ipt)
+      d1y(ider,ipt)=(d1y(ider,ipt)+temp_vec(0,ipt))*d1y_unscale_factor;
+  }
+  /*
+  printf("d1y(:,0)=[%g",d1y(0,0));
+  for(int ider=1; ider<numVarsr; ++ider)
+    printf(", %g",d1y(ider,0));
+  printf("]\n");
+  */
+  return d1y;
+}
+
+MtxDbl& KrigingModel::evaluate_d2y(MtxDbl& d2y, const MtxDbl& xr)
+{
+  int nptsxr=xr.getNCols();
+  int nder=num_multi_dim_poly_coef(numVarsr,-2);
+  d2y.newSize(nder,nptsxr);
+  if(buildDerOrder==0) {
+    double singular_y;
+    if(scaler.isYSingular(0,singular_y)) {
+      //you wouldn't want to do this for gradient based Kriging
+      //if gradients of y were used as inputs
+      d2y.zero();
+      return d2y;
+    }
+  }
+
+  MtxDbl xr_scaled(xr);
+  if(~(scaler.isUnScaled()))
+    scaler.scaleXrOther(xr_scaled);
+  //assert(numVarsr == xr.getNCols());
+
+  MtxInt der(numVarsr,nder);
+  MtxInt thisder(numVarsr,1);
+  multi_dim_poly_power(der,numVarsr,-2);
+
+  evaluate_poly_der(d2y,flyPoly,derivBetaHat,Poly,der,betaHat,xr_scaled);
+
+  MtxDbl r(numRowsR,nptsxr);
+  correlation_matrix(r, xr);
+  //apply_nugget_eval(r);
+  MtxDbl d1r(numRowsR,nptsxr);
+  MtxDbl d2r(numRowsR,nptsxr);
+  MtxDbl temp_vec(1,nptsxr);
+
+  for(int ider=0; ider<nder; ++ider) {
+    int ixr, jxr, ixrold=-1;
+
+    der.getCols(thisder,ider);
+    double d2y_unscale_factor=scaler.unScaleFactorDerY(thisder);
+    //std::cout << "thisder=[" << thisder(0,0) << ", " << thisder(1,0)
+    //<< "]^T unscalefactor=" << d2y_unscale_factor << std::endl;
+
+    //find the first dimension we are taking a first derviative of
+    for(ixr=0; ixr<numVarsr; ++ixr)
+      if(der(ixr,ider)>0)
+	break;
+
+    if(ixr!=ixrold) {
+      ixrold=ixr;
+      dcorrelation_matrix_dxI(d1r, r, xr_scaled, ixr);
+    }
+
+    //find the second dimension we are taking a first derivative of
+    if(der(ixr,ider)==2)
+      jxr=ixr;
+    else
+      for(jxr=ixr+1; jxr<numVarsr; ++jxr)
+	if(der(jxr,ider)>0)
+	  break;
+#ifdef __KRIG_ERR_CHECK__
+    assert(jxr<numVarsr);
+#endif
+
+    //dcorrelation_matrix_dxI(d2r, d1r, xr_scaled, jvar);
+    d2correlation_matrix_dxIdxJ(d2r, d1r, r, xr_scaled, ixr, jxr);
+    //std::cout << "ider=" << ider << " size(d2r)=[" << d2r.getNRows()
+    //	      << ", " << d2r.getNCols() << "]" << std::endl;
+    matrix_mult(temp_vec,rhs,d2r,0.0,1.0,'T','N');
+
+    for(int ipt=0; ipt<nptsxr; ++ipt)
+      d2y(ider,ipt)=(d2y(ider,ipt)+temp_vec(0,ipt))*d2y_unscale_factor;
+  }
+
+  return d2y;
+}
+
+
+
+/** matrix Ops evaluation of adjusted variance at a single point
+    adj_var=unadjvar*
+            (1-r^T*R^-1*r+(g-G*R^-1*r)^T*(G*R^-1*G^T)^-1*(g-G*R^-1*r))
+    on a point by point basis */
+double KrigingModel::eval_variance(const MtxDbl& xr)
+{
+#ifdef __KRIG_ERR_CHECK__
+  assert( (numVarsr==xr.getNRows()) && (xr.getNCols()==1) );
+#endif
+  MtxDbl g_minus_G_Rinv_r(nTrend,1), r(numRowsR,1);
+
+  double unscaled_unadj_var=estVarianceMLE;
+  if(scaler.isUnScaled()) {
+    eval_trend_fn(g_minus_G_Rinv_r, xr);
+    correlation_matrix(r, xr);
+  }
+  else{
+    unscaled_unadj_var*=scaler.unScaleFactorVarY();
+    MtxDbl xr_scaled(xr);
+    scaler.scaleXrOther(xr_scaled);
+    eval_trend_fn(g_minus_G_Rinv_r, xr_scaled);
+    correlation_matrix(r, xr_scaled);
+  }
+  //at this point g_minus_G_Rinv_r holds g
+
+  MtxDbl Rinv_r(numRowsR,1);
+  MtxDbl G_Rinv_Gtran_inv_g_minus_G_Rinv_r(nTrend,1);
+
+
+  solve_after_Chol_fact(Rinv_r,RChol,r);
+
+  matrix_mult(g_minus_G_Rinv_r,Rinv_Gtran,r,1.0,-1.0,'T','N');
+  //at this point g_minus_G_Rinv_r holds g-G*R^-*r (i.e. the name is correct)
+
+  solve_after_Chol_fact(G_Rinv_Gtran_inv_g_minus_G_Rinv_r,
+			G_Rinv_Gtran_Chol,g_minus_G_Rinv_r);
+
+
+  double adj_var=unscaled_unadj_var*
+    (1.0-dot_product(Rinv_r,r)+
+     dot_product(G_Rinv_Gtran_inv_g_minus_G_Rinv_r,g_minus_G_Rinv_r));
+  //if(!(adj_var>0.0)) {
+  //printf("adj_var=%g unscaled_unadj_var=%g rcondR=%g\n",adj_var,unscaled_unadj_var,rcondR);
+  //fflush(stdout);
+  //}
+  adj_var=std::fabs(adj_var); //hack to handle "negative zero" variance (numerical precision round off error)
+  if(adj_var<0.0) {
+    printf("NKM setting adj_var to zero adj_var=%g unadj_var=%g rcondR=%g\n",adj_var,unscaled_unadj_var,rcondR);
+    adj_var=0.0;
+  }
+  else if(adj_var==0.0)
+    printf("NKM adj_var is zero =%g\n",adj_var);
+  else if(!(adj_var>=0.0))
+    printf("double NKM_KrigingModel::eval_variance(...) adj_var=nan rcondR=%g\n",rcondR);
+
+  return adj_var;
+}
+
+/** Evaluate the adjusted variance for a collection of points using matrix
+    ops (i.e. BLAS and LAPACK) as much as possible)
+    adj_var=unadjvar*
+            (1-r^T*R^-1*r+(g-G*R^-1*r)^T*(G*R^-1*G^T)^-1*(g-G*R^-1*r))
+    on a point by point basis */
+MtxDbl& KrigingModel:: eval_variance(MtxDbl& adj_var, const MtxDbl& xr)
+{
+#ifdef __KRIG_ERR_CHECK__
+  assert(numVarsr==xr.getNRows());
+#endif
+  int nptsxr=xr.getNCols();
+  adj_var.newSize(1,nptsxr);
+  MtxDbl g_minus_G_Rinv_r(nTrend,nptsxr), r(numRowsR,nptsxr);
+
+  double unscaled_unadj_var=estVarianceMLE;
+  if(scaler.isUnScaled()) {
+    eval_trend_fn(g_minus_G_Rinv_r, xr);
+    correlation_matrix(r, xr);
+  }
+  else{
+    unscaled_unadj_var*=scaler.unScaleFactorVarY();
+    MtxDbl xr_scaled(xr);
+    scaler.scaleXrOther(xr_scaled);
+    eval_trend_fn(g_minus_G_Rinv_r, xr_scaled);
+    correlation_matrix(r, xr_scaled);
+  }
+  //right now g_minus_G_Rinv_r actually holds g
+
+  MtxDbl Rinv_r(numRowsR,nptsxr);
+  MtxDbl G_Rinv_Gtran_inv_g_minus_G_Rinv_r(nTrend,nptsxr);
+
+  solve_after_Chol_fact(Rinv_r,RChol,r);
+  matrix_mult(g_minus_G_Rinv_r,Rinv_Gtran,r,1.0,-1.0,'T','N');
+  //g_minus_G_Rinv_r now holds g-G*R^-1*r (i.e. it's name is correct)
+
+  solve_after_Chol_fact(G_Rinv_Gtran_inv_g_minus_G_Rinv_r,
+			G_Rinv_Gtran_Chol,g_minus_G_Rinv_r);
+
+  for(int ipt=0; ipt<nptsxr; ++ipt) {
+    //saved 2*nptsxr loops
+    adj_var(0,ipt)=1.0-r(0,ipt)*Rinv_r(0,ipt)+
+      g_minus_G_Rinv_r(0,ipt)*G_Rinv_Gtran_inv_g_minus_G_Rinv_r(0,ipt);
+
+    //looks a lot like matrix mult but only N^2 ops... it's the diagonal
+    //of a matrix matrix multiply
+    for(int iR=1; iR<numRowsR; ++iR)
+      adj_var(0,ipt)-=r(iR,ipt)*Rinv_r(iR,ipt);
+
+    //looks a lot like matrix mult but only N^2 ops ... it's the diagonal
+    //of a matrix matrix multiply
+    for(int itrend=1; itrend<nTrend; ++itrend)
+      adj_var(0,ipt)+=g_minus_G_Rinv_r(itrend,ipt)*
+	G_Rinv_Gtran_inv_g_minus_G_Rinv_r(itrend,ipt);
+
+    adj_var(0,ipt)*=unscaled_unadj_var;
+
+    if(adj_var(0,ipt)<0.0)
+      adj_var(0,ipt)=std::fabs(adj_var(0,ipt)); //zero to within round off and the magnitude of the negative value will give us an idea of how big round off is
+    else if(!(adj_var(0,ipt)>=0.0))
+      printf("MtxDbl& NKM_KrigingModel::eval_variance(...) adj_var(%d)=nan rcondR=%g\n",ipt,rcondR);
+  }
+
+  return adj_var;
+}
+
+/** set R=(R+nug*I), where the original R is the correlation matrix for the
+    data that the model is built from.  For GEK this generalizes to
+    R(i,i)=R(i,i)*(1+nug); Modifying the correlation matrix by the inclusion
+    of a nugget causes the KrigingModel to smooth the data, i.e. approximate
+    it rather than interpolate it (which is good if you know how big your
+    measurement noise is), it can also be used to fix an ill conditioned
+    correlation matrix.  The convention is that capital matrices are for
+    data the model is built from, lower case matrices are for arbitrary
+    points to evaluate the model at */
+void KrigingModel::apply_nugget_build() {
+  if(!(nug>0.0)) return;
+  //printf("applying nugget=%22.16g\n",nug);
+
+  int nrowsR=R.getNRows();
+#ifdef __KRIG_ERR_CHECK__
+  assert(nrowsR==R.getNCols());
+#endif
+
+  double one_plus_nug=1.0+nug;
+  for(int i=0; i<nrowsR; ++i)
+    R(i,i)*=one_plus_nug;
+
+  return;
+}
+
+// convert from correlation lengths to theta (a.k.a. correlation parameters)
+MtxDbl& KrigingModel::get_theta_from_corr_len(MtxDbl& theta,
+					      const MtxDbl& corr_len) const{
+  theta.newSize(numVarsr,1);
+  if(corrFunc==GAUSSIAN_CORR_FUNC)
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      theta(ixr,0)=0.5/(corr_len(ixr,0)*corr_len(ixr,0));
+  else if(corrFunc==EXP_CORR_FUNC) {
+#ifdef __KRIG_ERR_CHECK__
+    assert(buildDerOrder==0);
+#endif
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      theta(ixr,0)=1.0/corr_len(ixr,0);
+  }
+  else if(corrFunc==POW_EXP_CORR_FUNC) {
+#ifdef __KRIG_ERR_CHECK__
+    assert(buildDerOrder==0);
+#endif
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      theta(ixr,0)=1.0/
+	(powExpCorrFuncPow*std::pow(corr_len(ixr,0),powExpCorrFuncPow));
+  }
+  else if(corrFunc==MATERN_CORR_FUNC)
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      theta(ixr,0)=std::sqrt(2.0*maternCorrFuncNu)/corr_len(ixr,0);
+  else{
+    std::cerr << "unknown corrFunc in get_theta_from_corr_len()\n";
+    assert(false);
+  }
+  return theta;
+}
+
+// convert from theta (a.k.a. correlation parameters) to correlation lengths
+MtxDbl& KrigingModel::get_corr_len_from_theta(MtxDbl& corr_len,
+					      const MtxDbl& theta) const{
+  corr_len.newSize(numVarsr,1);
+  if(corrFunc==GAUSSIAN_CORR_FUNC)
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      corr_len(ixr,0)=std::sqrt(0.5/theta(ixr,0));
+  else if(corrFunc==EXP_CORR_FUNC) {
+#ifdef __KRIG_ERR_CHECK__
+    assert(buildDerOrder==0);
+#endif
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      corr_len(ixr,0)=1.0/theta(ixr,0);
+  }
+  else if(corrFunc==POW_EXP_CORR_FUNC) {
+#ifdef __KRIG_ERR_CHECK__
+    assert(buildDerOrder==0);
+#endif
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      corr_len(ixr,0)=
+	std::pow(powExpCorrFuncPow*theta(ixr,0),-1.0/powExpCorrFuncPow);
+  }
+  else if(corrFunc==MATERN_CORR_FUNC)
+    for(int ixr=0; ixr<numVarsr; ++ixr)
+      corr_len(ixr,0)=std::sqrt(2.0*maternCorrFuncNu)/theta(ixr,0);
+  else{
+    std::cerr << "unknown corrFunc in get_theta_from_corr_len()\n";
+    assert(false);
+  }
+  return corr_len;
+}
+
+
+
+
+/** the inline function
+    MtxDbl& KrigingModel::correlation_matrix(MtxDbl& r, const MtxDbl& xr) const
+    calls either
+    MtxDbl& KrigingModel::eval_kriging_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const (i.e. this function)
+    OR
+    MtxDbl& KrigingModel::eval_gek_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const
+
+    r (lower case r) is the Kriging correlation matrix between the
+    interpolation points and build data points, it used to EVALUATE but
+    not construct the emulator's Gaussian process error model
+    i.e. E(y(xr)|Y(XR))=betaHat^T*g(xr)+rhs^T*r  where
+    rhs=R^-1*(Y-G(XR)^T*betaHat)
+    choices for correlation function are gaussian, exponential,
+    powered exponential with 1<power<2, matern with nu=1.5 or 2.5
+    KRD wrote this */
+MtxDbl& KrigingModel::eval_kriging_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const
+{
+  if(buildDerOrder!=0) {
+    std::cerr << "You should only call eval_kriging_correlation_matrix when you want to evaluate regular Kriging (not GEK)\n";
+    assert(buildDerOrder==0);
+  }
+
+  int nptsxr=xr.getNCols(); //points at which we are evalutating the model
+#ifdef __KRIG_ERR_CHECK__
+  //  std::cerr<< "xr.getNRows()=" << xr.getNRows()
+  //	   << " numVarsr=" << numVarsr
+  //	   << " nptsxr=" << nptsxr << std::endl;
+
+  assert((xr.getNRows()==numVarsr)&&(0<nptsxr));
+#endif
+  r.newSize(numRowsR,nptsxr);
+  int i; //row index of the Kriging r matrix (also reorderd XR point index)
+  int j; //column index of the Kriging r matrix (also xr point index)
+  int k; //dimension index
+  double deltax;
+
+  if(corrFunc==GAUSSIAN_CORR_FUNC) {
+    // ******************************************************************
+    // the Gaussian Correlation function
+    // ******************************************************************
+    if(numVarsr==1) {
+      //special case for when there is only 1 input variable
+      double theta=correlations(0,0);
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(0,j)-XRreorder(0,i);
+	  r(i,j)=std::exp(-theta*deltax*deltax);
+	}
+    } else {
+      //general case there is more than 1 input variable
+      //even if nptsxr==1 outer looping once isn't a big performance hit
+      //so don't duplicate the code; smallest, i.e. k, loop is inside but
+      //that enables a single writing pass through the output array "r"
+      double sum_neg_theta_dx_squared;
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(0,j)-XRreorder(0,i);
+	  sum_neg_theta_dx_squared=-correlations(0,0)* //=- is correct
+	    deltax*deltax;
+	  for(k=1; k<numVarsr-1; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,i);
+	    sum_neg_theta_dx_squared-=correlations(k,0)* //-= is correct
+	      deltax*deltax;
+	  }
+	  k=numVarsr-1;
+	  deltax=xr(k,j)-XRreorder(k,i);
+	  r(i,j)=std::exp(sum_neg_theta_dx_squared
+			  -correlations(k,0)*deltax*deltax);
+
+	}
+    }
+  } else if(corrFunc==EXP_CORR_FUNC) {
+    // ******************************************************************
+    // the exponential correlation function
+    // ******************************************************************
+    if(numVarsr==1) {
+      //special case for when there is only 1 input variable
+      double theta=correlations(0,0);
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i)
+	  r(i,j)=std::exp(-theta*std::fabs(xr(0,j)-XRreorder(0,i)));
+    }
+    else {
+      //general case there is more than 1 input variable
+      //even if nptsxr==1 outer looping once isn't a big performance hit
+      //so don't duplicate the code; smallest, i.e. k, loop is inside but
+      //that enables a single writing pass through the output array "r"
+      double sum_neg_theta_abs_dx;
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  sum_neg_theta_abs_dx=-correlations(0,0)* //=- is correct
+	    std::fabs(xr(0,j)-XRreorder(0,i));
+	  for(k=1; k<numVarsr-1; ++k)
+	    sum_neg_theta_abs_dx-=correlations(k,0)* //-= is correct
+	      std::fabs(xr(k,j)-XRreorder(k,i));
+	  k=numVarsr-1;
+	  r(i,j)=std::exp(sum_neg_theta_abs_dx
+			  -correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,i)));
+	}
+    }
+  } else if(corrFunc==POW_EXP_CORR_FUNC) {
+    // ******************************************************************
+    // the powered exponential correlation function 1<powExpCorrFuncPow<2
+    // because exponention and Gaussian (a.k.a. squared exponential) were
+    // pulled out
+    // ******************************************************************
+    if(numVarsr==1) {
+      //special case for when there is only 1 input variable
+      double theta=correlations(0,0);
+      for(i=0; i<numPointsKeep; ++i)
+	for(j=0; j<nptsxr; ++j)
+	  r(i,j)=std::exp(-theta*std::pow(std::fabs(xr(0,j)-XRreorder(0,i)),
+					  powExpCorrFuncPow));
+    } else {
+      //general case there is more than 1 input variable
+      //even if nptsxr==1 outer looping once isn't a big performance hit
+      //so don't duplicate the code; smallest, i.e. k, loop is inside but
+      //that enables a single writing pass through the output array "r"
+      double sum_neg_theta_abs_dx_pow;
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  sum_neg_theta_abs_dx_pow=-correlations(0,0)* //=- is correct
+	    std::pow(std::fabs(xr(0,j)-XRreorder(0,i)),powExpCorrFuncPow);
+	  for(k=1; k<numVarsr-1; ++k)
+	    sum_neg_theta_abs_dx_pow-=correlations(k,0)* //-= is correct
+	      std::pow(std::fabs(xr(k,j)-XRreorder(k,i)),powExpCorrFuncPow);
+	  k=numVarsr-1;
+	  r(i,j)=std::exp(sum_neg_theta_abs_dx_pow-correlations(k,0)*
+			  std::pow(std::fabs(xr(k,j)-XRreorder(k,i)),
+				   powExpCorrFuncPow));
+	}
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+    // ******************************************************************
+    // the Matern 3/2 Correlation function
+    // ******************************************************************
+    double theta_abs_dx;
+    if(numVarsr==1) {
+      //special case for when there is only 1 input variable
+      double theta=correlations(0,0);
+      for(i=0; i<numPointsKeep; ++i)
+	for(j=0; j<nptsxr; ++j) {
+	  theta_abs_dx=theta*std::fabs(xr(0,j)-XRreorder(0,i));
+	  r(i,j)=(1.0+theta_abs_dx)*std::exp(-theta_abs_dx);
+	}
+    } else {
+      //general case there is more than 1 input variable
+      //even if nptsxr==1 outer looping once isn't a big performance hit
+      //so don't duplicate the code; smallest, i.e. k, loop is inside but
+      //that enables a single writing pass through the output array "r"
+      double sum_neg_theta_abs_dx;
+      double matern_coef_prod;
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  theta_abs_dx=correlations(0,0)*std::fabs(xr(0,j)-XRreorder(0,i));
+	  matern_coef_prod=1.0+theta_abs_dx;
+	  sum_neg_theta_abs_dx=-theta_abs_dx; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,i));
+	    matern_coef_prod*=(1.0+theta_abs_dx);
+	    sum_neg_theta_abs_dx-=theta_abs_dx; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,i));
+	  r(i,j)=matern_coef_prod*(1.0+theta_abs_dx)*
+	    std::exp(sum_neg_theta_abs_dx-theta_abs_dx);
+	}
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+    // ******************************************************************
+    // the Matern 5/2 Correlation function
+    // ******************************************************************
+    double theta_abs_dx;
+    const double one_third=1.0/3.0;
+    if(numVarsr==1) {
+      //special case for when there is only 1 input variable
+      double theta=correlations(0,0);
+      for(i=0; i<numPointsKeep; ++i)
+	for(j=0; j<nptsxr; ++j) {
+	  theta_abs_dx=theta*std::fabs(xr(0,j)-XRreorder(0,i));
+	  r(i,j)=(1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third)*
+	    std::exp(-theta_abs_dx);
+	}
+    } else {
+      //general case there is more than 1 input variable
+      //even if nptsxr==1 outer looping once isn't a big performance hit
+      //so don't duplicate the code; smallest, i.e. k, loop is inside but
+      //that enables a single writing pass through the output array "r"
+      double sum_neg_theta_abs_dx;
+      double matern_coef_prod;
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  theta_abs_dx=correlations(0,0)*std::fabs(xr(0,j)-XRreorder(0,i));
+	  matern_coef_prod=1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third;
+	  sum_neg_theta_abs_dx=-theta_abs_dx; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,i));
+	    matern_coef_prod*=
+	      (1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third);
+	    sum_neg_theta_abs_dx-=theta_abs_dx; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,i));
+	  r(i,j)=matern_coef_prod*
+	    (1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third)*
+	    std::exp(sum_neg_theta_abs_dx-theta_abs_dx);
+	}
+    }
+  } else{
+      std::cerr << "unknown corrFunc in MtxDbl& eval_kriging_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const\n";
+      assert(false);
+  }
+
+  return r;
+}
+
+
+/** the inline function
+    MtxDbl& KrigingModel::correlation_matrix(MtxDbl& r, const MtxDbl& xr) const
+    calls either
+    MtxDbl& KrigingModel::eval_kriging_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const
+    OR
+    MtxDbl& KrigingModel::eval_gek_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const (i.e. this function)
+
+    r (lower case r) is the GEK correlation matrix between the
+    interpolation points and build data points, it used to EVALUATE but
+    not construct the emulator's Gaussian process error model
+    i.e. E(y(xr)|Y(XR))=g(xr)^T*betaHat+r^T*R^-1*eps where
+    eps=(Y-G(XR)^T*betaHat)
+    choices for correlation function are gaussian, and matern with
+    nu=1.5 or 2.5 KRD wrote this */
+MtxDbl& KrigingModel::eval_gek_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const
+{
+  if(buildDerOrder!=1) {
+    std::cerr << "You should only call eval_gek_correlation_matrix when you want to evaluate Gradient Enhanced Kriging\n";
+    assert(buildDerOrder==1);
+  }
+
+  int nptsxr=xr.getNCols(); //points at which we are evalutating the model
+#ifdef __KRIG_ERR_CHECK__
+  assert((xr.getNCols()==numVarsr)&&(0<nptsxr));
+#endif
+
+  r.newSize(numRowsR,nptsxr);
+  int i; //row index of the GEK r matrix 0<=i<numRowsR
+  int j; //column index of the GEK r matrix 0<=j<nptsxr also the xr point index
+  int k; //dimension index 0<=k<numVarsr (num real variables)
+  int ipt; //XRreorder point index
+  double deltax;  //xr(k,j)-XRreorder(k,ipt)
+  double krig_r;
+
+  //note to future developers on a point that may be confusing otherwise (you
+  //might otherwise mistake this for a bug) all of the derivatives in this
+  //file are with respect to XR not xr, the matern_1pt5_d1_mult_r and
+  //matern_2pt5_d1_mult_r functions in the .hpp file would be for derivatives
+  //with respect to xr (assuming deltax=xr-XR), the difference is here I've
+  //absorbed the negative into deltax going from -deltax (where deltax=XR-xr)
+  //to deltax=xr-XR;
+  int neqn_per_pt=numVarsr+1;
+
+  if(corrFunc==GAUSSIAN_CORR_FUNC) {
+    if(numVarsr==1) {
+      double theta=correlations(0,0); //save matrix access lookup
+      double two_theta = 2.0*theta;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=(xr(0,j)-XRreorder(0,ipt));
+	  krig_r=std::exp(-theta*deltax*deltax);
+	  r(i  ,j)=krig_r;
+	  r(i+1,j)=two_theta*deltax*krig_r; //this is a first
+	  //derivative with respect to XR not xr
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //since there's part of another point left and we know that
+	  //there is only one derivative it means that were missing that
+	  //derivative and only have the function value
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep)&&
+		 (i==numRowsR-1));
+#endif
+	  deltax=(xr(0,j)-XRreorder(0,ipt));
+	  r(i,j)=std::exp(-theta*deltax*deltax);
+	}
+      }
+    } else{ //there is more than 1 dimensions and more than one
+      //evaluation point
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(0,j)-XRreorder(0,ipt);
+	  r(i+1,j)=correlations(0,0)*deltax; //dr_dXR/(2*r)
+	  krig_r=-correlations(0,0)*deltax*deltax; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,ipt);
+	    r(i+1+k,j)=correlations(k,0)*deltax; //dr_dXR/(2*r)
+	    krig_r-=correlations(k,0)*deltax*deltax; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  deltax=xr(k,j)-XRreorder(k,ipt);
+	  krig_r=std::exp(krig_r-correlations(k,0)*deltax*deltax);
+	  r(i,j)=krig_r; //r(XR(i,:),xr(j,:)) (the correlation function)
+	  krig_r*=2.0; //now it's 2*kriging's correlation function to save
+	  //some ops
+	  //dr_dXR_k=2*theta(k)*(xr(k,j)-XRreorder(k,ipt))*r(xr(k,j),XRreorder(k,ipt))
+	  r(i+1+k,j)=correlations(k,0)*deltax*krig_r; //dr_dXR
+	  for(k=0; k<numVarsr-1; ++k)
+	    r(i+1+k,j)*=krig_r; //dr_dXR
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //the last XR point isn't a "whole point" we dropped some derivatives
+	  //out of its gradient to meet the bound on rcond, numExtraDerKeep
+	  //is the number of derivatives kept for the last point.  The
+	  //derivatives of the last point have NOT been reordered, they appear
+	  //in the same order as the input variables
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  //printf("deltax=xr(0,%d)-XRreorder(0,%d);\n",j,ipt);
+	  deltax=xr(0,j)-XRreorder(0,ipt);
+	  krig_r=-correlations(0,0)*deltax*deltax; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,ipt);
+	    krig_r-=correlations(k,0)*deltax*deltax; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  deltax=xr(k,j)-XRreorder(k,ipt);
+	  krig_r=std::exp(krig_r-correlations(k,0)*deltax*deltax);
+	  r(i,j)=krig_r; //r(XR(i,:),xr) (the correlation function)
+	  krig_r*=2.0; //now it's 2*kriging's correlation function to save
+	  //some ops
+	  //dr_dXR_k=2*theta(k)*(xr(k,j)-XRreorder(k,ipt))*r(xr(k,j),XRreorder(k,ipt))
+	  for(k=0; k<numExtraDerKeep; ++k)
+	    r(i+1+k,j)=correlations(k,0)*(xr(k,j)-XRreorder(k,ipt))*krig_r; //dr_dXR
+	}
+      }
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+    //this starts the section for the Matern 3/2 correlation function
+
+    double theta_abs_dx;
+    if(numVarsr==1) {
+      double theta=correlations(0,0); //save array access lookup
+      double theta_squared= theta*theta;
+      double exp_neg_theta_abs_dx;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=(xr(0,j)-XRreorder(0,ipt));
+	  theta_abs_dx=theta*std::fabs(deltax);
+	  exp_neg_theta_abs_dx=std::exp(-theta_abs_dx);
+	  r(i  ,j)=(1.0+theta_abs_dx)*exp_neg_theta_abs_dx; //1D correlation
+	  //function
+	  r(i+1,j)=theta_squared*deltax*exp_neg_theta_abs_dx; //this is a first
+	  //derivative with respect to XR not xr
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //since there's part of another point left and we know that
+	  //there is only one derivative it means that were missing that
+	  //derivative and only have the function value
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep)&&
+		 (i==numRowsR-1));
+#endif
+	  theta_abs_dx=theta*std::fabs(xr(0,j)-XRreorder(0,ipt));
+	  r(i,j)=(1.0+theta_abs_dx)*std::exp(-theta_abs_dx); //1D correlation
+	  //function
+	}
+      }
+    }
+    else{ //there is more than 1 dimension
+      double matern_coef, matern_coef_prod, sum_neg_theta_abs_dx;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(0,j)-XRreorder(0,ipt);
+	  theta_abs_dx=correlations(0,0)*std::fabs(deltax);
+	  matern_coef=1.0+theta_abs_dx;
+	  matern_coef_prod=matern_coef;
+	  r(i+1,j)= //dr_dXR/r
+	    correlations(0,0)*correlations(0,0)*deltax/matern_coef;
+	  sum_neg_theta_abs_dx=-theta_abs_dx; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,ipt);
+	    theta_abs_dx=correlations(k,0)*std::fabs(deltax);
+	    matern_coef=1.0+theta_abs_dx;
+	    matern_coef_prod*=matern_coef;
+	    r(i+1+k,j)= //dr_dXR/r
+	      correlations(k,0)*correlations(k,0)*deltax/matern_coef;
+	    sum_neg_theta_abs_dx-=theta_abs_dx; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  deltax=xr(k,j)-XRreorder(k,ipt);
+	  theta_abs_dx=correlations(k,0)*std::fabs(deltax);
+	  matern_coef=1.0+theta_abs_dx;
+	  krig_r=matern_coef_prod*matern_coef*
+	    std::exp(sum_neg_theta_abs_dx-theta_abs_dx);
+	  r(i,j)=krig_r; //r(XR(i,:),xr) (the correlation function)
+	  r(i+1+k,j)= //dr_dXR
+	    correlations(k,0)*correlations(k,0)*deltax/matern_coef*krig_r;
+	  for(k=0; k<numVarsr-1; ++k)
+	    r(i+1+k,j)*=krig_r; //dr_dXR
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //the last XR point isn't a "whole point" we dropped some derivatives
+	  //out of its gradient to meet the bound on rcond, numExtraDerKeep
+	  //is the number of derivatives kept for the last point.  The
+	  //derivatives of the last point have NOT been reordered, they appear
+	  //in the same order as the input variables
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  theta_abs_dx=correlations(0,0)*std::fabs(xr(0,j)-XRreorder(0,ipt));
+	  matern_coef_prod=1.0+theta_abs_dx;
+	  sum_neg_theta_abs_dx=-theta_abs_dx; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,ipt));
+	    matern_coef_prod*=(1.0+theta_abs_dx);
+	    sum_neg_theta_abs_dx-=theta_abs_dx; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,ipt));
+	  krig_r=matern_coef_prod*(1.0+theta_abs_dx)*
+	    std::exp(sum_neg_theta_abs_dx-theta_abs_dx);
+	  r(i,j)=krig_r; //r(XR(i,:),xr) (the correlation function)
+	  for(k=0; k<numExtraDerKeep; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,ipt);
+	    r(i+1+k,j)=krig_r * //r(i+1+k,j)=dr_dXR
+	      correlations(k,0)*correlations(k,0)*deltax/
+	      (1.0+correlations(k,0)*std::fabs(deltax));
+	  }
+	}
+      }
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+    //this starts the section for the Matern 5/2 correlation function
+
+    const double one_third=1.0/3.0;
+    double theta_abs_dx;
+    if(numVarsr==1) {
+      double theta=correlations(0,0); //save array access lookup
+      double theta_squared= theta*theta;
+      double exp_neg_theta_abs_dx;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=(xr(0,j)-XRreorder(0,ipt));
+	  theta_abs_dx=theta*std::fabs(deltax);
+	  exp_neg_theta_abs_dx=std::exp(-theta_abs_dx);
+	  r(i  ,j)=(1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third)*
+	    exp_neg_theta_abs_dx; //1D correlation function
+	  r(i+1,j)=theta_squared*deltax*(1.0+theta_abs_dx)*one_third*
+	    exp_neg_theta_abs_dx; //this is a first derivative with respect
+	  //to XR not xr
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //since there's part of another point left and we know that
+	  //there is only one derivative it means that were missing that
+	  //derivative and only have the function value
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep)&&
+		 (i==numRowsR-1));
+#endif
+	  theta_abs_dx=theta*std::fabs(xr(0,j)-XRreorder(0,ipt));
+	  r(i  ,j)=(1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third)*
+	    std::exp(-theta_abs_dx); //1D correlation function
+	}
+      }
+    }
+    else{ //there is more than 1 dimension
+      double matern_coef, matern_coef_prod, sum_neg_theta_abs_dx;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(0,j)-XRreorder(0,ipt);
+	  theta_abs_dx=correlations(0,0)*std::fabs(deltax);
+	  matern_coef=1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third;
+	  matern_coef_prod=matern_coef;
+	  r(i+1,j)= //dr_dXR/r
+	    correlations(0,0)*correlations(0,0)*deltax*(1.0+theta_abs_dx)*
+	    one_third/matern_coef;
+	  sum_neg_theta_abs_dx=-theta_abs_dx; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,ipt);
+	    theta_abs_dx=correlations(k,0)*std::fabs(deltax);
+	    matern_coef=1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third;
+	    matern_coef_prod*=matern_coef;
+	    r(i+1+k,j)= //dr_dXR/r
+	      correlations(k,0)*correlations(k,0)*deltax*(1.0+theta_abs_dx)*
+	      one_third/matern_coef;
+	    sum_neg_theta_abs_dx-=theta_abs_dx; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  deltax=xr(k,j)-XRreorder(k,ipt);
+	  theta_abs_dx=correlations(k,0)*std::fabs(deltax);
+	  matern_coef=1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third;
+	  krig_r=matern_coef_prod*matern_coef*
+	    std::exp(sum_neg_theta_abs_dx-theta_abs_dx);
+	  r(i,j)=krig_r; //r(XR(i,:),xr) (the correlation function)
+	  r(i+1+k,j)= //dr_dXR
+	    correlations(k,0)*correlations(k,0)*deltax*(1.0+theta_abs_dx)*
+	    one_third/matern_coef*krig_r;
+	  for(k=0; k<numVarsr-1; ++k)
+	    r(i+1+k,j)*=krig_r; //dr_dXR
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //the last XR point isn't a "whole point" we dropped some derivatives
+	  //out of its gradient to meet the bound on rcond, numExtraDerKeep
+	  //is the number of derivatives kept for the last point.  The
+	  //derivatives of the last point have NOT been reordered, they appear
+	  //in the same order as the input variables
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  theta_abs_dx=correlations(0,0)*std::fabs(xr(0,j)-XRreorder(0,ipt));
+	  matern_coef_prod=1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third;
+	  sum_neg_theta_abs_dx=-theta_abs_dx; //=- is correct
+	  for(k=1; k<numVarsr-1; ++k) {
+	    theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,ipt));
+	    matern_coef_prod*=
+	      (1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third);
+	    sum_neg_theta_abs_dx-=theta_abs_dx; //-= is correct
+	  }
+	  k=numVarsr-1;
+	  theta_abs_dx=correlations(k,0)*std::fabs(xr(k,j)-XRreorder(k,ipt));
+	  krig_r=matern_coef_prod*
+	    (1.0+theta_abs_dx+theta_abs_dx*theta_abs_dx*one_third)*
+	    std::exp(sum_neg_theta_abs_dx-theta_abs_dx);
+	  r(i,j)=krig_r; //r(XR(i,:),xr) (the correlation function)
+	  for(k=0; k<numExtraDerKeep; ++k) {
+	    deltax=xr(k,j)-XRreorder(k,ipt);
+	    theta_abs_dx=correlations(k,0)*std::fabs(deltax);
+	    r(i+1+k,j)=krig_r * //r(i+1+k,j)=dr_dXR
+	      correlations(k,0)*correlations(k,0)*deltax*(1.0+theta_abs_dx)/
+	      (3.0*(1.0+theta_abs_dx)+theta_abs_dx*theta_abs_dx);
+
+	  }
+	}
+      }
+    }
+  } else{
+    std::cerr << "Unknown or Invalid Correlation function for Gradient Enhanced Kriging in MtxDbl& KrigingModel::eval_gek_correlation_matrix(MtxDbl& r, const MtxDbl& xr) const\n";
+    assert(false);
+  }
+
+
+  return r;
+}
+
+
+///Ider is the variable/dimension not the point
+MtxDbl& KrigingModel::eval_kriging_dcorrelation_matrix_dxI(MtxDbl& dr, const MtxDbl& r, const MtxDbl& xr, int Ider) const
+{
+  if(buildDerOrder!=0) {
+    std::cerr << "You should only call eval_kriging_dcorrelation_matrix_dxI when you want to evaluate regular Kriging's (not GEK's) first derivative.\n";
+    assert(buildDerOrder==0);
+  }
+  int nptsxr=xr.getNCols();
+#ifdef __KRIG_ERR_CHECK__
+  assert((r.getNCols()==nptsxr)&&(r.getNRows()==numRowsR)&&
+	 (xr.getNRows()==numVarsr)&&(0<=Ider)&&(Ider<numVarsr));
+#endif
+  dr.newSize(numRowsR,nptsxr);
+  int i; //row index of r & dr, also the point index of reordered XR
+  int j; //column index of r & dr, also the point index of xr
+
+  if(corrFunc==GAUSSIAN_CORR_FUNC) {
+    // *******************************************************************
+    // Gaussian Correlation Function
+    // GAUSSIAN_CORR_FUNC is infinitely differentiable
+    // *******************************************************************
+    double neg_two_theta=-2.0*correlations(Ider,0); //save matrix dereference
+    //for speed
+    for(j=0; j<nptsxr; ++j)
+      for(i=0; i<numPointsKeep; ++i)
+	dr(i,j)=r(i,j)*neg_two_theta*(xr(Ider,j)-XRreorder(Ider,i));
+  } else if(corrFunc==EXP_CORR_FUNC) {
+    // *******************************************************************
+    // Exponential Correlation Function
+    // 1D EXP_CORR_FUNC r(x1,x2) is differentiable except where x1==x2
+    // this is correct for x1!=x2
+    // *******************************************************************
+    double neg_theta=-correlations(Ider,0); //save matrix dereference for
+    //speed
+    for(j=0; j<nptsxr; ++j)
+      for(i=0; i<numPointsKeep; ++i)
+	dr(i,j)=r(i,j)*neg_theta*dsign(xr(Ider,j)-XRreorder(Ider,i));
+  } else if(corrFunc==POW_EXP_CORR_FUNC) {
+    // *******************************************************************
+    // Powered Exponential Correlation Function with 1<power<2
+    // 1D POW_EXP_CORR_FUNC r(x1,x2) is once differential everywhere (and
+    // twice+ differentiable where x1!=x2)
+    // *******************************************************************
+    double neg_theta_pow=-powExpCorrFuncPow*correlations(Ider,0); //save
+    //matrix dereference for speed
+    double pow_m_1=powExpCorrFuncPow-1.0; //for speed
+    double delta_x;
+    for(int j=0; j<nptsxr; ++j)
+      for(int i=0; i<numPointsKeep; ++i) {
+	delta_x=xr(Ider,j)-XRreorder(Ider,i);
+	dr(i,j)=r(i,j)*dsign(delta_x)*neg_theta_pow*
+	  std::pow(std::fabs(delta_x),pow_m_1);
+      }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+    // *******************************************************************
+    // Matern 3/2 Correlation Function
+    // 1D MATERN_CORR_FUNC 1.5 is once differentiable everywhere (and
+    // twice+ differentiable where x1!=x2, while not twice differentiable
+    // at x1==x2 the limit of the 2nd derivative is defined and is the
+    // same from both sides see Lockwood and Anitescu)
+    // *******************************************************************
+    double theta=correlations(Ider,0); //save matrix dereference for speed
+    for(j=0; j<nptsxr; ++j)
+      for(i=0; i<numPointsKeep; ++i)
+	dr(i,j)=r(i,j)*
+	  matern_1pt5_d1_mult_r(theta,xr(Ider,j)-XRreorder(Ider,i));
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+    // *******************************************************************
+    // Matern 5/2 Correlation Function
+    // 1D MATERN_CORR_FUNC 2.5 is twice differentiable everywhere (and
+    // twice+ differentiable where x1!=x2)
+    // *******************************************************************
+    double theta=correlations(Ider,0); //save matrix dereference for speed
+    for(j=0; j<nptsxr; ++j)
+      for(i=0; i<numPointsKeep; ++i)
+	dr(i,j)=r(i,j)*
+	  matern_2pt5_d1_mult_r(theta,xr(Ider,j)-XRreorder(Ider,i));
+  } else{
+    std::cerr << "unknown corrFunc in MtxDbl& KrigingModel::eval_kriging_dcorrelation_matrix_dxI(MtxDbl& dr, const MtxDbl& r, const MtxDbl& xr, int Ider) const\n";
+    assert(false);
+  }
+  return dr;
+}
+///Ider is the variable/dimension not the point
+MtxDbl& KrigingModel::eval_gek_dcorrelation_matrix_dxI(MtxDbl& dr, const MtxDbl& r, const MtxDbl& xr, int Ider) const
+{
+  if(buildDerOrder!=1) {
+    std::cerr << "You should only call eval_gek_dcorrelation_matrix_dxI when you want to evaluate Gradient Enhanced Kriging's first derivative\n";
+    assert(buildDerOrder==1);
+  }
+  int nptsxr=xr.getNCols();
+#ifdef __KRIG_ERR_CHECK__
+  assert((r.getNCols()==nptsxr)&&(r.getNRows()==numRowsR)&&
+	 (xr.getNRows()==numVarsr)&&(0<=Ider)&&(Ider<numVarsr));
+#endif
+  dr.newSize(numRowsR,nptsxr);
+  int neqn_per_pt=1+numVarsr;
+  int i; //row index of r & dr
+  int j; //column index of r & dr, also the point index of xr
+  int k; //dimension index
+  int ipt; //point index of reordered XR
+
+  if(corrFunc==GAUSSIAN_CORR_FUNC) {
+    // *******************************************************************
+    // Gaussian Correlation Function
+    // GAUSSIAN_CORR_FUNC is infinitely differentiable
+    // *******************************************************************
+    double two_theta=2.0*correlations(Ider,0); //save matrix dereference for speed
+    double neg_two_theta_dx;
+    if(numVarsr==1)
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  neg_two_theta_dx=two_theta*(XRreorder(Ider,ipt)-xr(Ider,j));
+	  dr(i  ,j)=r(i,j)*neg_two_theta_dx;
+	  dr(i+1,j)=r(i,j)*two_theta + r(i+1,j)*neg_two_theta_dx;
+	}
+	// since there is only one dimension if there is a partial point
+	// it will be a function value only, and actually recalculating it
+	// will likely be faster on average then checking if there's a
+	// partial point and calculating it if needed
+	ipt=numPointsKeep-1;
+	i=ipt*2;
+	dr(i  ,j)=r(i,j)*two_theta*(XRreorder(Ider,ipt)-xr(Ider,j));
+      }
+    else{
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  neg_two_theta_dx=two_theta*(XRreorder(Ider,ipt)-xr(Ider,j));
+	  dr(i,j)=r(i,j)*neg_two_theta_dx;
+	  for(k=0; k<numVarsr; ++k)
+	    dr(i+1+k,j)=r(i+1+k,j)*neg_two_theta_dx;
+	  dr(i+1+Ider,j)+=r(i,j)*two_theta;
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should be what we need them to be
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  neg_two_theta_dx=two_theta*(XRreorder(Ider,ipt)-xr(Ider,j));
+	  dr(i,j)=r(i,j)*neg_two_theta_dx;
+	  for(k=0; k<numExtraDerKeep; ++k)
+	    dr(i+1+k,j)=r(i+1+k,j)*neg_two_theta_dx;
+	  if(Ider<numExtraDerKeep)
+	    dr(i+1+Ider,j)+=r(i,j)*two_theta;
+	}
+      }
+    }
+  } else if(corrFunc==EXP_CORR_FUNC) {
+    std::cerr << "The exponential correlation function is not a valid correlation function for gradient enhanced Kriging\n";
+      assert(false);
+  } else if(corrFunc==POW_EXP_CORR_FUNC) {
+    std::cerr << "The powered exponential (with power < 2) correlation function is not a valid correlation function for gradient enhanced Kriging\n";
+      assert(false);
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+    // *******************************************************************
+    // Matern 3/2 Correlation Function
+    // 1D MATERN_CORR_FUNC 1.5 is once differentiable everywhere (and
+    // twice+ differentiable where x1!=x2, while not twice differentiable
+    // at x1==x2 the limit of the 2nd derivative is defined and is the
+    // same from both sides see Lockwood and Anitescu)
+    // *******************************************************************
+    double theta=correlations(Ider,0); //save matrix dereference for speed
+    double neg_theta_squared=-theta*theta;
+    double deltax;
+    double matern_coef;
+    if(numVarsr==1)
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=(xr(Ider,j)-XRreorder(Ider,ipt));
+	  matern_coef=1.0+theta*std::fabs(deltax);
+	  dr(i  ,j)=r(i,j)*neg_theta_squared*deltax/matern_coef;
+	  dr(i+1,j)=r(i,j)*neg_theta_squared*(1.0-2.0/matern_coef);
+	}
+	// since there is only one dimension if there is a partial point
+	// it will be a function value only, and actually recalculating it
+	// will likely be faster on average then checking if there's a
+	// partial point and calculating it if needed
+	ipt=numPointsKeep-1;
+	i=ipt*2;
+	deltax=(xr(Ider,j)-XRreorder(Ider,ipt));
+	dr(i,j)=r(i,j)*neg_theta_squared*deltax/(1.0+theta*std::fabs(deltax));
+      }
+    else{
+      double matern_d1_mult_r;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=(xr(Ider,j)-XRreorder(Ider,ipt));
+	  matern_coef=1.0+theta*std::fabs(deltax);
+	  matern_d1_mult_r=neg_theta_squared*deltax/matern_coef;
+	  dr(i  ,j)=r(i,j)*matern_d1_mult_r;
+	  for(k=0; k<numVarsr; ++k)
+	    dr(i+1+k,j)=r(i+1+k,j)*matern_d1_mult_r;
+	  dr(i+1+Ider,j)=r(i,j)*neg_theta_squared*(1.0-2.0/matern_coef);
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should be what we need them to be
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=(xr(Ider,j)-XRreorder(Ider,ipt));
+	  matern_coef=1.0+theta*std::fabs(deltax);
+	  matern_d1_mult_r=neg_theta_squared*deltax/matern_coef;
+	  dr(i  ,j)=r(i,j)*matern_d1_mult_r;
+	  for(k=0; k<numExtraDerKeep; ++k)
+	    dr(i+1+k,j)=r(i+1+k,j)*matern_d1_mult_r;
+	  if(Ider<numExtraDerKeep)
+	    dr(i+1+Ider,j)=r(i,j)*neg_theta_squared*(1.0-2.0/matern_coef);
+	}
+      }
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+    // *******************************************************************
+    // Matern 5/2 Correlation Function
+    // 1D MATERN_CORR_FUNC 2.5 is twice differentiable everywhere (and
+    // twice+ differentiable where x1!=x2)
+    // *******************************************************************
+    double theta=correlations(Ider,0); //save matrix dereference for speed
+    double theta_squared=theta*theta;
+    double theta_abs_dx;
+    double deltax;
+    if(numVarsr==1) {
+      double r_theta_squared_div_3_matern_coef;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=(xr(Ider,j)-XRreorder(Ider,ipt));
+	  theta_abs_dx=theta*std::fabs(deltax);
+	  r_theta_squared_div_3_matern_coef=r(i,j)*theta_squared/
+	    (3.0*(1.0+theta_abs_dx)+theta_abs_dx*theta_abs_dx);
+	  dr(i  ,j)=-r_theta_squared_div_3_matern_coef*
+	    deltax*(1.0+theta_abs_dx);
+	  dr(i+1,j)=r_theta_squared_div_3_matern_coef*
+	    (1.0+theta_abs_dx-theta_abs_dx*theta_abs_dx);
+	}
+	// since there is only one dimension if there is a partial point
+	// it will be a function value only, and actually recalculating it
+	// will likely be faster on average then checking if there's a
+	// partial point and calculating it if needed
+	ipt=numPointsKeep-1;
+	i=ipt*2;
+	deltax=(xr(Ider,j)-XRreorder(Ider,ipt));
+	theta_abs_dx=theta*std::fabs(deltax);
+	dr(i,j)=-r(i,j)*theta_squared/
+	  (3.0*(1.0+theta_abs_dx)+theta_abs_dx*theta_abs_dx)*
+	  deltax*(1.0+theta_abs_dx);
+      }
+    } else{
+      double theta_squared_div_3_matern_coef;
+      double matern_d1_mult_r;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Ider,j)-XRreorder(Ider,ipt);
+	  theta_abs_dx=theta*std::fabs(deltax);
+	  theta_squared_div_3_matern_coef=theta_squared/
+	    (3.0*(1.0+theta_abs_dx)+theta_abs_dx*theta_abs_dx);
+	  matern_d1_mult_r=-theta_squared_div_3_matern_coef*
+	    deltax*(1.0+theta_abs_dx);
+	  dr(i  ,j)=r(i,j)*matern_d1_mult_r;
+	  for(k=0; k<numVarsr; ++k)
+	    dr(i+1+k,j)=r(i+1+k,j)*matern_d1_mult_r;
+	  dr(i+1+Ider,j)=r(i,j)*theta_squared_div_3_matern_coef*
+	    (1.0+theta_abs_dx-theta_abs_dx*theta_abs_dx);
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should be what we need them to be
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Ider,j)-XRreorder(Ider,ipt);
+	  theta_abs_dx=theta*std::fabs(deltax);
+	  theta_squared_div_3_matern_coef=theta_squared/
+	    (3.0*(1.0+theta_abs_dx)+theta_abs_dx*theta_abs_dx);
+	  matern_d1_mult_r=-theta_squared_div_3_matern_coef*
+	    deltax*(1.0+theta_abs_dx);
+	  dr(i  ,j)=r(i,j)*matern_d1_mult_r;
+	  for(k=0; k<numExtraDerKeep; ++k)
+	    dr(i+1+k,j)=r(i+1+k,j)*matern_d1_mult_r;
+	  if(Ider<numExtraDerKeep)
+	    dr(i+1+Ider,j)=r(i,j)*theta_squared_div_3_matern_coef*
+	      (1.0+theta_abs_dx-theta_abs_dx*theta_abs_dx);
+	}
+      }
+    }
+  } else{
+    std::cerr << "unknown corrFunc in MtxDbl& KrigingModel::eval_gek_dcorrelation_matrix_dxI(MtxDbl& dr, const MtxDbl& r, const MtxDbl& xr, int Ider) const\n";
+    assert(false);
+  }
+
+  return dr;
+}
+
+
+
+MtxDbl& KrigingModel::eval_kriging_d2correlation_matrix_dxIdxJ(MtxDbl& d2r, const MtxDbl& drI, const MtxDbl& r, const MtxDbl& xr, int Ider, int Jder) const
+{
+  if(buildDerOrder!=0) {
+    std::cerr << "You should only call eval_kriging_correlation_matrix when you want to evaluate regular Kriging (not GEK)\n";
+    assert(buildDerOrder==0);
+  }
+
+  int nptsxr=xr.getNCols(); //points at which we are evalutating the model
+  d2r.newSize(numPointsKeep,nptsxr);
+
+#ifdef __KRIG_ERR_CHECK__
+  assert((r.getNCols()==nptsxr)&&(r.getNRows()==numPointsKeep)&&
+	 (xr.getNRows()==numVarsr)&&(0<=Jder)&&(Jder<numVarsr));
+#endif
+
+  int i; //row index of r, d1r, & d2r; also the point index of reordered XR
+  int j; //column index of r, d1r, & d2r; also the point index of xr
+
+  if(corrFunc==GAUSSIAN_CORR_FUNC) {
+    // *********************************************************************
+    // The GAUSSIAN CORRELATION FUNCTION
+    // is infinitely differentiable, i.e. is C^infinity continuous
+    // *********************************************************************
+    double neg_two_theta_J=-2.0*correlations(Jder,0);
+    if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i)
+	  d2r(i,j)=neg_two_theta_J*
+	    ((xr(Jder,j)-XRreorder(Jder,i))*drI(i,j)+r(i,j));
+    } else {
+      // taking the product of the 1st derivative of 2 independent 1D
+      // correlation functions
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i)
+	  d2r(i,j)=neg_two_theta_J*
+	    (xr(Jder,j)-XRreorder(Jder,i))*drI(i,j);
+    }
+  } else if(corrFunc==EXP_CORR_FUNC) {
+    // *********************************************************************
+    // The EXPONENTIAL CORRELATION FUNCTION
+    // the first derivative WRT theta(J) is
+    //     drJ=-theta(J)*sign(xr(J)-XR(J))*r
+    // if away from xr(J)==XR(J) then d(sign(xr(J)-XR(J))/dxr(J)=0
+    // it at xr(J)==XR(J) then derivative of sign (a.k.a step function) is
+    // two times the delta function (or a rectangle with area 2, whose base
+    // width is a point, i.e. zero, meaning the delta function is infinite).
+    // The following is correct as long as xr(J)=/=XR(J), i.e. as long as
+    // the evaluation point doesn't share a coordinate with any build point.
+    // *********************************************************************
+    double neg_theta_J=-correlations(Jder,0);
+    for(j=0; j<nptsxr; ++j)
+      for(i=0; i<numPointsKeep; ++i)
+	d2r(i,j)=neg_theta_J*dsign(xr(Jder,j)-XRreorder(Jder,i))*drI(i,j);
+  } else if(corrFunc==POW_EXP_CORR_FUNC) {
+    // *********************************************************************
+    // The POWERED EXPONENTIAL CORRELATION FUNCTION with 1<power<2
+    //
+    // the 1st derivative with respect to xr of the 1D correlation function
+    // is defined
+    //
+    // The 2nd derivative with respect to xr of the 1D correlation function
+    // *is undefined at xr==XR,
+    // *approaches negative infinity as xr approaches XR from below, and
+    // *approaches positive infinity as xr approaches XR from above
+    // when xr==XR we use the average of the second derivative from above and
+    // the second derivative from below, that average is exactly zero
+    // *********************************************************************
+    double neg_thetaJ_pow=-correlations(Jder,0)*powExpCorrFuncPow;
+    double pow_minus_1=powExpCorrFuncPow-1.0;
+    double abs_dx;
+    double deltax;
+    if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function
+      double pow_minus_2=powExpCorrFuncPow-2.0;
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,i);
+	  if(deltax==0) {
+	    d2r(i,j)=0.0;
+	    std::cerr << "the 2nd derivative of the powered exponential correlation function (with 1<power<2) is undefined when a coordinate of the evaluation point equals the coordinate of a build point, using the zero as the average of + infinity (from above) and - infinity (from below)\n";
+	  }
+	  else{
+	    abs_dx=std::fabs(deltax);
+	    d2r(i,j)=neg_thetaJ_pow*dsign(deltax)*
+	      (pow_minus_1*std::pow(abs_dx,pow_minus_2)*r(i,j)+
+	       std::pow(abs_dx,pow_minus_1)*drI(i,j));
+	  }
+	}
+    } else {
+      //we are taking the product of the first derivatives of 2 independent
+      //1D correlation functions so we don't have to worry about the 2nd
+      //derivative of a 1D correlation function being undefined
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,i);
+	  d2r(i,j)=neg_thetaJ_pow*dsign(deltax)*
+	    std::pow(std::fabs(deltax),pow_minus_1)*drI(i,j);
+	}
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+    // *********************************************************************
+    // The MATERN 3/2 CORRELATION FUNCTION
+    //
+    // the 1st derivative with respect to xr of the 1D correlation function
+    // is defined
+    //
+    // The 2nd derivative with respect to xr of the 1D correlation function
+    // *is undefined at xr==XR,
+    // *is -theta^2*(1-theta*|xr-XR|)*exp(-theta*|xr-XR|) at xr=/=XR
+    // *approaches -theta^2 from above and below
+    // when xr==XR we use the limit, -theta^2
+    // this follows the approach of
+    //   Lockwood, Brian A. and Anitescu, Mihai, "Gradient-Enhanced
+    //      Universal Kriging for Uncertainty Proagation"
+    //      Preprint ANL/MCS-P1808-1110
+    // *********************************************************************
+    double thetaJ=correlations(Jder,0);
+    double neg_thetaJ_squared=-thetaJ*thetaJ;
+    double deltax;
+    if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,i);
+	  d2r(i,j)=neg_thetaJ_squared*
+	    (2.0/(1.0+thetaJ*std::fabs(deltax))-1.0)*r(i,j);
+	}
+    }else{
+      // taking the product of the 1st derivative of 2 independent 1D
+      // correlation functions
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,i);
+	  d2r(i,j)=neg_thetaJ_squared*deltax/(1.0+thetaJ*std::fabs(deltax))*
+	    drI(i,j);
+	}
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+    // *********************************************************************
+    // The MATERN 5/2 CORRELATION FUNCTION
+    //
+    // the 1st and 2nd derivatives with respect to xr of the 1D correlation
+    // function are defined, no special treatment is required
+    // *********************************************************************
+    double thetaJ=correlations(Jder,0);
+    double neg_thetaJ_squared=-thetaJ*thetaJ;
+    double deltax;
+    double thetaJ_abs_dx;
+    if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,i);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  d2r(i,j)=neg_thetaJ_squared*
+	    (1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx)/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx)*
+	    r(i,j);
+	}
+    }
+    else {
+      // taking the product of the 1st derivative of 2 independent 1D
+      // correlation functions
+      for(j=0; j<nptsxr; ++j)
+	for(i=0; i<numPointsKeep; ++i) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,i);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  d2r(i,j)=neg_thetaJ_squared*deltax*(1.0+thetaJ_abs_dx)/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx)*
+	    drI(i,j);
+	}
+    }
+  } else{
+    std::cerr << "unknown corrFunc in MtxDbl& KrigingModel::eval_kriging_d2correlation_matrix_dxIdxJ(MtxDbl& d2r, const MtxDbl& drI, const MtxDbl& r, const MtxDbl& xr, int Ider, int Jder) const\n";
+    assert(false);
+  }
+  return d2r;
+}
+MtxDbl& KrigingModel::eval_gek_d2correlation_matrix_dxIdxJ(MtxDbl& d2r, const MtxDbl& drI, const MtxDbl& r, const MtxDbl& xr, int Ider, int Jder) const
+{
+  if(buildDerOrder!=1) {
+    std::cerr << "You should only call eval_gek_dcorrelation_matrix_dxI when you want to evaluate Gradient Enhanced Kriging's second derivative\n";
+    assert(buildDerOrder==1);
+  }
+  int nptsxr=xr.getNCols(); //points at which we are evalutating the model
+  d2r.newSize(numRowsR,nptsxr);
+
+#ifdef __KRIG_ERR_CHECK__
+  assert((r.getNCols()==nptsxr)&&(r.getNRows()==numPointsKeep)&&
+	 (xr.getNRows()==numVarsr)&&(0<=Jder)&&(Jder<numVarsr));
+#endif
+
+  int i; //row index of r, d1r, & d2r
+  int j; //column index of r, d1r, & d2r; also the point index of xr
+  int k; //dimension index
+  int ipt; //point index of reordered XR
+  int neqn_per_pt=numVarsr+1;
+  double deltax;
+
+  if(corrFunc==GAUSSIAN_CORR_FUNC) {
+    // *********************************************************************
+    // The GAUSSIAN CORRELATION FUNCTION
+    // is infinitely differentiable, i.e. is C^infinity continuous
+    // the reuse lower order derivates formulas are derived by taking
+    // derivatives in the reverse order of occurance and not expanding
+    // derivatives or r of d1r (here called drI)
+    // *********************************************************************
+    double neg_two_thetaJ=-2.0*correlations(Jder,0);
+    if(numVarsr==1) {
+      // if there is only one input variable we are taking the 2nd derivative
+      // of the 1D correlation function
+      // AND WE KNOW THAT Ider=Jder=k so we don't have to "if" to add the
+      // the extra terms
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  d2r(i  ,j)=neg_two_thetaJ*(deltax*drI(i  ,j)+r(i  ,j));
+	  d2r(i+1,j)=neg_two_thetaJ*(deltax*drI(i+1,j)+r(i+1,j)-drI(i,j));
+	}
+	// since there is only one dimension if there is a partial point
+	// it will be a function value only, and actually recalculating it
+	// will likely be faster on average then checking if there's a
+	// partial point and calculating it if needed
+	ipt=numPointsKeep-1;
+	i=ipt*2;
+	d2r(i,j)=neg_two_thetaJ*
+	  ((xr(Jder,j)-XRreorder(Jder,ipt))*drI(i,j)+r(i,j));
+      }
+    } else if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function
+      // the extra term is -2*theta(J)*r (remember r is for GEK so
+      // the k loop part of it contains derivatives of the Kriging
+      // correlation function with respect to XR)
+      //      std::cout << "size(r)=[" << r.getNRows() << "," << r.getNCols() << "]\n"
+      //		<< "size(drI)=[" << drI.getNRows() << "," << drI.getNCols() << "\n"
+      //	<< "size(d2r)=[" << d2r.getNRows() << "," << d2r.getNCols()
+      //	<< std::endl;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  d2r(i,j)=neg_two_thetaJ*(deltax*drI(i,j)+r(i,j));
+	  for(k=0; k<numVarsr; ++k) {
+	    //std::cout << "i=" << i << " j=" << j << " k=" << k << std::endl;
+	    d2r(i+1+k,j)=neg_two_thetaJ*(deltax*drI(i+1+k,j)+r(i+1+k,j));
+	  }
+	  d2r(i+1+Jder,j)-=neg_two_thetaJ*drI(i,j); //minus a negative is
+	  //a positive is correct, this extra term is for Jder=k
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should already have the values we need them to
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  d2r(i,j)=neg_two_thetaJ*(deltax*drI(i,j)+r(i,j));
+	  for(k=0; k<numExtraDerKeep; ++k)
+	    d2r(i+1+k,j)=neg_two_thetaJ*(deltax*drI(i+1+k,j)+r(i+1+k,j));
+	  if(Jder<numExtraDerKeep)
+	    d2r(i+1+Jder,j)-=neg_two_thetaJ*drI(i,j); //minus a negative is
+  	    //a positive is correct, this extra term is for Jder=k
+	}
+      }
+    } else {
+      // taking the product of the 1st derivative of 2 independent 1D
+      // correlation functions, (actually because this is for GEK, the
+      // k loop is 2nd derivative of the Kriging r, in dimensions
+      // independent of the one we're now taking the 1st derivative of)
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  d2r(i,j)=neg_two_thetaJ*deltax*drI(i,j);
+	  for(k=0; k<numVarsr; ++k)
+	    d2r(i+1+k,j)=neg_two_thetaJ*deltax*drI(i+1+k,j);
+	  d2r(i+1+Jder,j)-=neg_two_thetaJ*drI(i,j); //actually one element
+	  //of the k loop is the dimension we're taking a derivative with
+	  //respect to, it gets an extra term added to it. minus a negative
+	  //is a positive is correct, this extra term is for Jder=k
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should already have the values we need them to
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  d2r(i,j)=neg_two_thetaJ*deltax*drI(i,j);
+	  for(k=0; k<numExtraDerKeep; ++k)
+	    d2r(i+1+k,j)=neg_two_thetaJ*deltax*drI(i+1+k,j);
+	  if(Jder<numExtraDerKeep)
+	    d2r(i+1+Jder,j)-=neg_two_thetaJ*drI(i,j); //minus a negative is
+ 	    //a positive is correct, this extra term is for Jder=k
+	}
+      }
+    }
+  } else if(corrFunc==EXP_CORR_FUNC) {
+    std::cerr << "The exponential correlation function is not a valid correlation function for gradient enhanced Kriging\n";
+      assert(false);
+  } else if(corrFunc==POW_EXP_CORR_FUNC) {
+    std::cerr << "The powered exponential (with power < 2) correlation function is not a valid correlation function for gradient enhanced Kriging\n";
+      assert(false);
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+    // *********************************************************************
+    // The MATERN 3/2 CORRELATION FUNCTION
+    //
+    // the 1st derivative with respect to xr of the 1D correlation function
+    // is defined
+    //
+    // The 2nd derivative with respect to xr of the 1D correlation function
+    // *is undefined at xr==XR,
+    // *is -theta^2*(1-theta*|xr-XR|)*exp(-theta*|xr-XR|) at xr=/=XR
+    // *approaches -theta^2 from above and below
+    // when xr==XR we use the limit, -theta^2
+    // this follows the approach of
+    //   Lockwood, Brian A. and Anitescu, Mihai, "Gradient-Enhanced
+    //      Universal Kriging for Uncertainty Proagation"
+    //      Preprint ANL/MCS-P1808-1110
+    // *********************************************************************
+    double thetaJ=correlations(Jder,0);
+    double thetaJ_squared=thetaJ*thetaJ;
+    double thetaJ_abs_dx;
+    if(numVarsr==1) {
+      // if there is only one input variable we are taking the 2nd derivative
+      // of the 1D GEK correlation function (which contains first derivatives
+      // of the Kriging r with respect to XR) AND WE KNOW THAT Ider=Jder=k so
+      // we don't have to "if" to known when to give the 2nd derivative of
+      // GEK r = 3rd derivative of Kriging r, special treatment
+      double thetaJ_cubed=thetaJ_squared*thetaJ;
+      double r_div_matern_coef;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  r_div_matern_coef=r(i,j)/(1.0+thetaJ_abs_dx);
+	  d2r(i  ,j)=r_div_matern_coef*thetaJ_squared*(thetaJ_abs_dx-1.0);
+	  d2r(i+1,j)=r_div_matern_coef*thetaJ_cubed*dsign(deltax)*
+	    (thetaJ_abs_dx-2.0);
+	}
+	// since there is only one dimension if there is a partial point
+	// it will be a function value only, and actually recalculating it
+	// will likely be faster on average then checking if there's a
+	// partial point and calculating it if needed
+	ipt=numPointsKeep-1;
+	i=ipt*2;
+	thetaJ_abs_dx=thetaJ*std::fabs(xr(Jder,j)-XRreorder(Jder,ipt));
+	d2r(i,j)=r(i,j)/(1.0+thetaJ_abs_dx)*thetaJ_squared*(thetaJ_abs_dx-1.0);
+      }
+    } else if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function of the GEK
+      // (not Kriging) r, which itself contains derivative of the Kriging r
+      // with respect to XR, but this 2nd derivative is indepedent of those
+      // first derivatives in all but one dimension
+      double thetaJ_cubed=thetaJ_squared*thetaJ;
+      double matern_coef;
+      double d2_mult_r;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  matern_coef=(1.0+thetaJ_abs_dx);
+	  d2_mult_r=thetaJ_squared*(thetaJ_abs_dx-1.0)/matern_coef;
+	  d2r(i,j)=r(i,j)*d2_mult_r;
+	  for(k=0; k<numVarsr; ++k) //this k loop assumes that the current
+	    //dimension is independent of the one that the XR derivative was
+	    //taken with respect to, it's correct for all but one k
+	    d2r(i+1+k,j)=r(i+1+k,j)*d2_mult_r;
+	  //rather than having an if inside the loop which is slow, we're just
+	  //going to reassign the d2r for k==Jder like this
+	  d2r(i+1+Jder,j)=r(i,j)* //indexes of r(i,j) are correct
+	    (thetaJ_cubed*dsign(deltax)*(thetaJ_abs_dx-2.0)/matern_coef);
+	}
+
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should already have the values we need them to
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  matern_coef=(1.0+thetaJ_abs_dx);
+	  d2_mult_r=thetaJ_squared*(thetaJ_abs_dx-1.0)/matern_coef;
+	  d2r(i,j)=r(i,j)*d2_mult_r;
+	  for(k=0; k<numExtraDerKeep; ++k) //this k loop assumes that the
+	    //current dimension is independent of the one that the XR
+	    //derivative was taken with respect to
+	    d2r(i+1+k,j)=r(i+1+k,j)*d2_mult_r;
+	  if(Jder<numExtraDerKeep) //if the dimension we're now taking a
+	    //derivative with respect to wasn't clipped from the partial point
+	    //we need to correct/reassign it for k==Jder
+	    d2r(i+1+Jder,j)=r(i,j)* //indexes of r(i,j) are correct
+	      (thetaJ_cubed*dsign(deltax)*(thetaJ_abs_dx-2.0)/matern_coef);
+	}
+      }
+    } else {
+      // taking the product of the 1st derivative (for GEK) of 2 independent
+      // 1D correlation functions (they're independent because Jder!=Ider).
+      // But since the GEK r contains first derivatives of the Kriging r,
+      // there is one dimension, k==Jder, that needs special treatment
+      double matern_coef;
+      double d1_mult_r;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  matern_coef=1.0+thetaJ_abs_dx;
+	  d1_mult_r=-thetaJ_squared*deltax/matern_coef;
+	  d2r(i,j)=drI(i,j)*d1_mult_r;
+	  for(k=0; k<numVarsr; ++k)  //this k loop assumes that the
+	    //current dimension is independent of the one that the XR
+	    //derivative was taken with respect to
+	    d2r(i+1+k,j)=drI(i+1+k,j)*d1_mult_r;
+	  //rather than having an if inside the loop which is slow, we're just
+	  //going to reassign the d2r for k==Jder like this
+	  d2r(i+1+Jder,j)=drI(i,j)* //indexes of drI(i,j) are correct
+	    thetaJ_squared*(1.0-thetaJ_abs_dx)/matern_coef; //sign is
+	    //opposite the numVarsr==1 d2r(i,j) because one of the 2
+	    //derivatives is taken with respect to XR instead of xr
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should already have the values we need them to
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  matern_coef=1.0+thetaJ_abs_dx;
+	  d1_mult_r=-thetaJ_squared*deltax/matern_coef;
+	  d2r(i,j)=drI(i,j)*d1_mult_r;
+	  for(k=0; k<numExtraDerKeep; ++k) //this k loop assumes that the
+	    //current dimension is independent of the one that the XR
+	    //derivative was taken with respect to
+	    d2r(i+1+k,j)=drI(i+1+k,j)*d1_mult_r;
+	  if(Jder<numExtraDerKeep)  //if the dimension we're now taking a
+	    //derivative with respect to wasn't clipped from the partial point
+	    //we need to correct/reassign it for k==Jder
+	    d2r(i+1+Jder,j)=drI(i,j)* //indexes of drI(i,j) are correct
+	      thetaJ_squared*(1.0-thetaJ_abs_dx)/matern_coef;
+	}
+      }
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+    // *********************************************************************
+    // The MATERN 5/2 CORRELATION FUNCTION
+    //
+    // the 1st and 2nd derivatives with respect to xr of the 1D correlation
+    // function are defined,
+    // 3rd derivative of Kriging r technically not defined at xr==XR (it is
+    // defined everywhere else) but the limit from both sides is defined and
+    // goes to zero at xr==XR (which means the limit from both sides agree)
+    // so we'll use the else where defined 3rd derivative even at xr==XR
+    // *********************************************************************
+    double thetaJ=correlations(Jder,0);
+    double thetaJ_squared=thetaJ*thetaJ;
+    double thetaJ_abs_dx;
+    if(numVarsr==1) {
+      // if there is only one input variable we are taking the 2nd derivative
+      // of the 1D GEK correlation function (which contains first derivatives
+      // of the Kriging r with respect to XR) AND WE KNOW THAT Ider=Jder=k so
+      // we don't have to "if" to known when to give the 2nd derivative of
+      // GEK r = 3rd derivative of Kriging r, special treatment
+      double r_thetaJ_squared_div_3_matern_coef;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=2) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  r_thetaJ_squared_div_3_matern_coef=r(i,j)*thetaJ_squared/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx);
+	  d2r(i  ,j)=r_thetaJ_squared_div_3_matern_coef*
+	    -(1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx);
+	  d2r(i+1,j)=r_thetaJ_squared_div_3_matern_coef*
+	    -thetaJ_squared*deltax*(3.0-thetaJ_abs_dx);
+	}
+	// since there is only one dimension, if there is a partial point
+	// it will be a function value only, and actually recalculating it
+	// will likely be faster on average then checking if there's a
+	// partial point and calculating it if needed
+	ipt=numPointsKeep-1;
+	i=ipt*2;
+	thetaJ_abs_dx=thetaJ*std::fabs(xr(Jder,j)-XRreorder(Jder,ipt));
+	d2r(i,j)=r(i,j)*thetaJ_squared/
+	  (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx)*
+	  -(1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx);
+      }
+    } else if(Ider==Jder) {
+      // taking the 2nd derivative of the 1D correlation function of the GEK
+      // (not Kriging) r, which itself contains derivative of the Kriging r
+      // with respect to XR, but this 2nd derivative is indepedent of those
+      // first derivatives in all but one dimension
+      double neg_thetaJ_squared_div_3_matern_coef;
+      double d2_mult_r;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  neg_thetaJ_squared_div_3_matern_coef=-thetaJ_squared/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx);
+	  d2_mult_r=neg_thetaJ_squared_div_3_matern_coef*
+	    (1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx);
+	  d2r(i,j)=r(i,j)*d2_mult_r;
+	  for(k=0; k<numVarsr; ++k) //this k loop assumes that the current
+	    //dimension is independent of the one that the XR derivative was
+	    //taken with respect to, it's correct for all but one k
+	    d2r(i+1+k,j)=r(i+1+k,j)*d2_mult_r;
+	  //rather than having an if inside the loop which is slow, we're just
+	  //going to reassign the d2r for k==Jder like this
+	  d2r(i+1+Jder,j)=r(i,j)* //indexes of r(i,j) are correct
+	    neg_thetaJ_squared_div_3_matern_coef*
+	    thetaJ_squared*deltax*(3.0-thetaJ_abs_dx);
+	}
+
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should already have the values we need them to
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  neg_thetaJ_squared_div_3_matern_coef=-thetaJ_squared/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx);
+	  d2_mult_r=neg_thetaJ_squared_div_3_matern_coef*
+	    (1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx);
+	  d2r(i,j)=r(i,j)*d2_mult_r;
+	  for(k=0; k<numExtraDerKeep; ++k) //this k loop assumes that the
+	    //current dimension is independent of the one that the XR
+	    //derivative was taken with respect to
+	    d2r(i+1+k,j)=r(i+1+k,j)*d2_mult_r;
+	  if(Jder<numExtraDerKeep) //if the dimension we're now taking a
+	    //derivative with respect to wasn't clipped from the partial point
+	    //we need to correct/reassign it for k==Jder
+	    d2r(i+1+Jder,j)=r(i,j)* //indexes of r(i,j) are correct
+	      neg_thetaJ_squared_div_3_matern_coef*
+	      thetaJ_squared*deltax*(3.0-thetaJ_abs_dx);
+	}
+      }
+    } else {
+      // taking the product of the 1st derivative (for GEK) of 2 independent
+      // 1D correlation functions (they're independent because Jder!=Ider).
+      // But since the GEK r contains first derivatives of the Kriging r,
+      // there is one dimension, k==Jder, that needs special treatment
+      double thetaJ_squared_div_3_matern_coef;
+      double d1_mult_r;
+      for(j=0; j<nptsxr; ++j) {
+	for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt, i+=neqn_per_pt) {
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  thetaJ_squared_div_3_matern_coef=thetaJ_squared/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx);
+	  d1_mult_r=-thetaJ_squared_div_3_matern_coef*
+	    deltax*(1.0+thetaJ_abs_dx);
+	  d2r(i,j)=drI(i,j)*d1_mult_r;
+	  for(k=0; k<numVarsr; ++k)  //this k loop assumes that the
+	    //current dimension is independent of the one that the XR
+	    //derivative was taken with respect to
+	    d2r(i+1+k,j)=drI(i+1+k,j)*d1_mult_r;
+	  //rather than having an if inside the loop which is slow, we're just
+	  //going to reassign the d2r for k==Jder like this
+	  d2r(i+1+Jder,j)=drI(i,j)* //indexes of drI(i,j) are correct
+	    thetaJ_squared_div_3_matern_coef*
+	    (1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx); //sign is
+	    //opposite the numVarsr==1 d2r(i,j) because one of the 2
+	    //derivatives is taken with respect to XR instead of xr
+	}
+	if(numPointsKeep>numWholePointsKeep) {
+	  //ipt and i should already have the values we need them to
+#ifdef __KRIG_ERR_CHECK__
+	  assert((ipt==numWholePointsKeep)&&
+		 (ipt==numPointsKeep-1)&&
+		 (i==neqn_per_pt*numWholePointsKeep));
+#endif
+	  deltax=xr(Jder,j)-XRreorder(Jder,ipt);
+	  thetaJ_abs_dx=thetaJ*std::fabs(deltax);
+	  thetaJ_squared_div_3_matern_coef=thetaJ_squared/
+	    (3.0*(1.0+thetaJ_abs_dx)+thetaJ_abs_dx*thetaJ_abs_dx);
+	  d1_mult_r=-thetaJ_squared_div_3_matern_coef*
+	    deltax*(1.0+thetaJ_abs_dx);
+	  d2r(i,j)=drI(i,j)*d1_mult_r;
+	  for(k=0; k<numExtraDerKeep; ++k) //this k loop assumes that the
+	    //current dimension is independent of the one that the XR
+	    //derivative was taken with respect to
+	    d2r(i+1+k,j)=drI(i+1+k,j)*d1_mult_r;
+	  if(Jder<numExtraDerKeep)  //if the dimension we're now taking a
+	    //derivative with respect to wasn't clipped from the partial point
+	    //we need to correct/reassign it for k==Jder
+	    d2r(i+1+Jder,j)=drI(i,j)* //indexes of drI(i,j) are correct
+	      thetaJ_squared_div_3_matern_coef*
+	      (1.0+thetaJ_abs_dx-thetaJ_abs_dx*thetaJ_abs_dx);
+	}
+      }
+    }
+  } else{
+    std::cerr << "unknown corrFunc in MtxDbl& KrigingModel::eval_gek_d2correlation_matrix_dxIdxJ(MtxDbl& d2r, const MtxDbl& drI, const MtxDbl& r, const MtxDbl& xr, int Ider, int Jder) const\n";
+    assert(false);
+  }
+  return d2r;
+}
+
+
+
+/** this function is typically used during emulator construction, the below
+    the diagonal portion of R = exp(Z^T*theta), where R is symmetric with 1's
+    on the diagonal, theta is the vector of correlations and the Z matrix is
+    defined as Z(k,ij)=-(XR(k,i)-XR(k,j))^2 where ij counts downward within
+    columns of R starting from the element below the diagonal and continues
+    from one column to the next, Z^T*theta is matrix vector multiplication to
+    be performed efficiently by BLAS, V=Z^T*theta is a vector with
+    nchoosek(numPoints,2) elements.  We need to copy exp(V(ij)) to R(i,j)
+    and R(j,i) to produce R. The Z matrix is produced by
+    KrigingModel::gen_Z_matrix()     KRD wrote this */
+void KrigingModel::correlation_matrix(const MtxDbl& theta)
+{
+  int ncolsZ=Z.getNCols();
+  //printf("nrowsZ=%d; numPoints=%d; ''half'' numPoints^2=%d; numVarsr=%d; theta.getNRows()=%d\n",
+  //	 ncolsZ,numPoints,nchoosek(numPoints,2),numVarsr,theta.getNRows());
+  //fflush(stdout);
+#ifdef __KRIG_ERR_CHECK__
+  assert((ncolsZ==nchoosek(numPoints,2))&&
+	 (numVarsr==Z.getNRows())&&
+	 (numVarsr==theta.getNRows())&&
+	 (1==theta.getNCols()));
+#endif
+
+  Ztran_theta.newSize(ncolsZ,1); //Z transpose because subsequent access of a
+  //column vector should be marginally faster than a row vector
+  matrix_mult(Ztran_theta,Z,theta,0.0,1.0,'T','N');
+
+  if(buildDerOrder==0)
+    numRowsR=numPoints;
+  else if(buildDerOrder==1)
+    numRowsR=numPoints*nDer;
+  else{
+    std::cerr << "buildDerOrder=" << buildDerOrder << " in void KrigingModel::correlation_matrix(const MtxDbl& theta).  It must either be 0 for Kriging or 1 for Gradient Enhanced Kriging.  Higher order build derivatives, (e.g. Hessian Enhanced Kriging) have not been implemented." << std::endl;
+    assert(false);
+  }
+  R.newSize(numRowsR,numRowsR);
+
+  //Do the regular (Der0) Kriging Portion of the Correlation matrix first
+  double Rij_temp;
+  int ij=0;
+  if((corrFunc==GAUSSIAN_CORR_FUNC)||
+     (corrFunc==EXP_CORR_FUNC)||
+     (corrFunc==POW_EXP_CORR_FUNC)) {
+    for(int j=0; j<numPoints-1; ++j) {
+      R(j,j)=1.0;
+      for(int i=j+1; i<numPoints; ++i, ++ij) {
+	Rij_temp=std::exp(Ztran_theta(ij,0));
+	R(i,j)=Rij_temp;
+	R(j,i)=Rij_temp;
+      }
+    }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)){
+    //for matern Z(k,ij)=-|XR(k,i)-XR(k,j)| we want to feed
+    //theta(k,0)*|XR(k,i)-XR(k,j)| to matern_1pt5_coef so we need to
+    //negate the already negative quantity
+    if(numVarsr==1)
+      for(int j=0; j<numPoints-1; ++j) {
+	R(j,j)=1.0;
+	for(int i=j+1; i<numPoints; ++i, ++ij) {
+	  Rij_temp=std::exp(Ztran_theta(ij,0))*
+	    matern_1pt5_coef(-Ztran_theta(ij,0));
+	  R(i,j)=Rij_temp;
+	  R(j,i)=Rij_temp;
+	}
+      }
+    else
+      for(int j=0; j<numPoints-1; ++j) {
+	R(j,j)=1.0;
+	for(int i=j+1; i<numPoints; ++i, ++ij) {
+	  Rij_temp=std::exp(Ztran_theta(ij,0))*
+	    matern_1pt5_coef(-Z(0,ij)*theta(0,0));
+	  for(int k=1; k<numVarsr; ++k)
+	    Rij_temp*=matern_1pt5_coef(-Z(k,ij)*theta(k,0));
+	  R(i,j)=Rij_temp;
+	  R(j,i)=Rij_temp;
+	}
+      }
+  } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)){
+    //for matern Z(k,ij)=-|XR(k,i)-XR(k,j)| we want to feed
+    //theta(k,0)*|XR(k,i)-XR(k,j)| to matern_2pt5_coef so we need to
+    //negate the already negative quantity
+    if(numVarsr==1)
+      for(int j=0; j<numPoints-1; ++j) {
+	R(j,j)=1.0;
+	for(int i=j+1; i<numPoints; ++i, ++ij) {
+	  Rij_temp=std::exp(Ztran_theta(ij,0))*
+	    matern_2pt5_coef(-Ztran_theta(ij,0));
+	  R(i,j)=Rij_temp;
+	  R(j,i)=Rij_temp;
+	}
+      }
+    else
+      for(int j=0; j<numPoints-1; ++j) {
+	R(j,j)=1.0;
+	for(int i=j+1; i<numPoints; ++i, ++ij) {
+	  Rij_temp=std::exp(Ztran_theta(ij,0))*
+	    matern_2pt5_coef(-Z(0,ij)*theta(0,0));
+	  for(int k=1; k<numVarsr; ++k)
+	    Rij_temp*=matern_2pt5_coef(-Z(k,ij)*theta(k,0));
+	  R(i,j)=Rij_temp;
+	  R(j,i)=Rij_temp;
+	}
+      }
+  }else{
+    std::cerr << "unknown corrFunc in void KrigingModel::correlation_matrix(const MtxDbl& theta)\n";
+    assert(false);
+  }
+  R(numPoints-1,numPoints-1)=1.0;
+
+  /*
+  FILE *fp=fopen("km_Rmat_check.txt","w");
+  for(int i=0; i<numPoints; ++i) {
+      fprintf(fp,"%-12.6g", R(i,0));
+      for(int j=1; j<numPoints; ++j)
+	fprintf(fp," %-12.6g", R(i,j));
+      fprintf(fp,"\n");
+  }
+  fclose(fp);
+  */
+
+  if(buildDerOrder>0) {
+    //Gaussian Matern1.5 and Matern2.5 are valid correlation functions for
+    //Gradient Enhanced Kriging
+    double temp_double;
+    int zij, j;
+
+
+    if(corrFunc==GAUSSIAN_CORR_FUNC) {
+      //now handle the first order derivative submatrices, indiviually the first
+      //order derivative SUBmatrices are anti-symmetric but the whole matrix is
+      //symmetric
+      int Ii, Ij, Jj, Ji; //first letter identifies index OF derivative submatrix
+      //second letter identifies index INTO derivative SUBmatrix
+      for(int Ider=0; Ider<numVarsr; ++Ider) {
+	zij=0;
+	double two_theta_Ider=2.0*theta(Ider,0);
+	for(j=0; j<numPoints-1; ++j) {//j<numPoints-1 avoids an i loop of length 0
+	  //diagonal (_j,_j) of off diagonal (I_, _) submatrix
+	  Ij=(Ider+1)*numPoints+j;
+	  R(Ij, j)=0.0;
+	  R( j,Ij)=0.0;
+	  //Ij=(Ider+1)*numPoints+j;
+	  for(int i=j+1; i<numPoints; ++i, ++zij) {
+	    //off diagonal (_i,_j) of off-diagonal (I_, _) submatrix
+	    Ii=(Ider+1)*numPoints+i;
+	    temp_double=-two_theta_Ider*deltaXR(zij,Ider)*R( i, j);
+	    //here  temp_double=
+	    //                  R(Ii, j) = dR(i,j)/dXR1(Ider,i)
+	    //                  R( j,Ii) = dR(i,j)/dXR2(Ider,j)
+	    //and  -temp_double=
+	    //                  R(Ij, i) = dR(i,j)/dXR1(Ider,j)
+	    //                  R( i,Ij) = dR(i,j)/dXR2(Ider,i)
+	    //where XR1 is the first argument of the correlation function
+	    //and XR2 is the second argument of the correlation function
+	    R(Ii, j)= temp_double;
+	    R( j,Ii)= temp_double; //whole R matrix is symmetric
+	    R(Ij, i)=-temp_double;
+	    R( i,Ij)=-temp_double; //off-diagonal 1st order (actually all odd
+	    //order) derivative SUBmatrices are anti-symmetric
+	  }
+	}
+	//diagonal (_j,_j) of off diagonal (I_, _) submatrix
+	j=numPoints-1; //avoids an i loop of length 0
+	Ij=(Ider+1)*numPoints+j;
+	R(Ij,j)=0.0;
+	R(j,Ij)=0.0;
+      }
+
+      //note that all 2nd order (actually all even order) derivative SUBmatrices
+      //are symmetric because the hadamard product of 2 (actually any even
+      //number of) anti-symmetric matrices is a symmetric matrix
+      double two_theta_Jder;
+      for(int Jder=0; Jder<numVarsr; ++Jder) {
+	//do the on diagonal (J_,J_) submatrix
+	two_theta_Jder=2.0*theta(Jder,0);
+	zij=0;
+	for(j=0; j<numPoints-1; ++j) { //j<numPoints-1 avoids an i loop of length 0
+	  //diagonal (_j,_j) of on diagonal (J_,J_) submatrix
+	  Jj=(Jder+1)*numPoints+j;
+	  R(Jj,Jj)=two_theta_Jder; //R(Jj,Jj)=2*theta(Jder,0)*R(j,j); R(j,j)=1;
+	  for(int i=j+1; i<numPoints; ++i) {
+	    //off diagonal (_i,_j) of on-diagonal (J_,J_) submatrix
+	    Ji=(Jder+1)*numPoints+i;
+	    temp_double=two_theta_Jder*deltaXR(zij,Jder)*R(Ji, j)+
+	    two_theta_Jder*R( i, j);
+	    R(Ji,Jj)=temp_double;
+	    R(Jj,Ji)=temp_double;
+	    ++zij;
+	  }
+	}
+	//diagonal (_j,_j) of on diagonal (J_,J_) submatrix
+	j=numPoints-1; //avoids an i loop of length 0
+	Jj=(Jder+1)*numPoints+j;
+	R(Jj,Jj)=two_theta_Jder; //R(j,j)=1 R(Jj,Jj)=2*theta(Jder,0)*R(j,j)
+
+
+	//do the off diagonal (I_,J_) submatrices
+	for(int Ider=Jder+1; Ider<numVarsr; ++Ider) {
+	  //off diagonal (I_,J_) submatrix
+	  zij=0;
+	  for(j=0; j<numPoints-1; ++j) {//j<numPoints-1 avoids an i loop of length 0
+	    //diagonal (_j,_j) of off-diagonal (I_,J_) submatrix
+	    Jj=(Jder+1)*numPoints+j;
+	    Ij=(Ider+1)*numPoints+j;
+	    R(Ij,Jj)=0.0;
+	    R(Jj,Ij)=0.0;
+
+
+	    for(int i=j+1; i<numPoints; ++i) {
+	      //off diagonal (_i,_j) of off-diagonal (I_,J_) submatrix
+	      Ii=(Ider+1)*numPoints+i;
+	      Ji=(Jder+1)*numPoints+i;
+	      temp_double=two_theta_Jder*deltaXR(zij,Jder)*R(Ii, j);
+	      R(Ii,Jj)= temp_double;
+	      R(Ij,Ji)= temp_double;
+	      R(Ji,Ij)= temp_double;
+	      R(Jj,Ii)= temp_double;
+	      ++zij;
+	    }
+	  }
+	  //diagonal (_j,_j) of off-diagonal (I_,J_) submatrix
+	  j=numPoints-1; //avoids an i loop of length 0
+	  Ij=(Ider+1)*numPoints+j;
+	  Jj=(Jder+1)*numPoints+j;
+	  R(Ij,Jj)=0.0;
+	  R(Jj,Ij)=0.0;
+	}
+      }
+    } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==1.5)) {
+      //The second derivative of the Matern1.5 correlation function
+      //is not strictly defined at XR(i,Ider)==XR(j,Jder) but the limit
+      //of the second derivative from both sides is defined and is the same
+      //this follows
+      //Lockwood, Brian A. and Anitescu, Mihai, "Gradient-Enhanced
+      //    Universal Kriging for Uncertainty Proagation"
+      //    Preprint ANL/MCS-P1808-1110
+      //
+      //d2r_dXIdXJ with Ider==Jder
+      // = theta^2*exp(-theta*|XI-XJ|)-theta^3*|XI-XJ|*exp(-theta*|XI-XJ|)
+      // = -theta^2*(1-2/matern_1pt5_coef)*r(XI,XJ)
+      // = -matern_1pt5_d2_mult_r(theta,+/-(XI-XJ))*r(XI,XJ) (note the
+      //    negative sign, it should be here, but does not appear when
+      //    evalutation 2nd derivative of GP, because there it is second
+      //    derivative with respect to the SAME argument, here it is the
+      //    second derivative with respect to different arguments)
+
+      //now handle the first order derivative submatrices, indiviually the first
+      //order derivative SUBmatrices are anti-symmetric but the whole matrix is
+      //symmetric
+      int Ii, Ij, Jj, Ji; //first letter identifies index OF derivative
+      //submatrix second letter identifies index INTO derivative SUBmatrix
+      for(int Ider=0; Ider<numVarsr; ++Ider) {
+	zij=0;
+	double theta_Ider=theta(Ider,0);
+	for(j=0; j<numPoints-1; ++j) {//j<numPoints-1 avoids an i loop of
+	  //length 0
+
+	  //diagonal (_j,_j) of off diagonal (I_, _) submatrix
+	  Ij=(Ider+1)*numPoints+j;
+	  R(Ij, j)=0.0;
+	  R( j,Ij)=0.0;
+	  //Ij=(Ider+1)*numPoints+j;
+	  for(int i=j+1; i<numPoints; ++i) {
+	    //off diagonal (_i,_j) of off-diagonal (I_, _) submatrix
+	    Ii=(Ider+1)*numPoints+i;
+	    temp_double=
+	      matern_1pt5_d1_mult_r(theta_Ider,deltaXR(zij,Ider))*R( i, j);
+	    R(Ii, j)= temp_double;
+	    R( j,Ii)= temp_double; //whole R matrix is symmetric
+	    R(Ij, i)=-temp_double;
+	    R( i,Ij)=-temp_double; //off-diagonal 1st order (actually all odd
+	    //order) derivative SUBmatrices are anti-symmetric
+	    ++zij;
+	  }
+	}
+	//diagonal (_j,_j) of off diagonal (I_, _) submatrix
+	j=numPoints-1; //avoids an i loop of length 0
+	Ij=(Ider+1)*numPoints+j;
+	R(Ij, j)=0.0;
+	R( j,Ij)=0.0;
+      }
+
+      //note that all 2nd order (actually all even order) derivative SUBmatrices
+      //are symmetric because the hadamard product of 2 (actually any even
+      //number of) anti-symmetric matrices is a symmetric matrix
+      double theta_Jder;
+      double theta_Jder_squared;
+      for(int Jder=0; Jder<numVarsr; ++Jder) {
+	//do the on diagonal (J_,J_) submatrix
+	theta_Jder=theta(Jder,0);
+	theta_Jder_squared=theta_Jder*theta_Jder;
+	zij=0;
+	for(j=0; j<numPoints-1; ++j) { //j<numPoints-1 avoids an i loop of length 0
+	  //diagonal (_j,_j) of on diagonal (J_,J_) submatrix
+	  Jj=(Jder+1)*numPoints+j;
+	  R(Jj,Jj)=theta_Jder_squared;
+	  for(int i=j+1; i<numPoints; ++i) {
+	    //off diagonal (_i,_j) of on-diagonal (J_,J_) submatrix
+	    Ji=(Jder+1)*numPoints+i;
+	    temp_double=//neg sign because d^2/dXR1dXR2 instead of d^2/dXR1^2
+	      -matern_1pt5_d2_mult_r(theta_Jder,deltaXR(zij,Jder))*R( i, j);
+	    R(Ji,Jj)=temp_double;
+	    R(Jj,Ji)=temp_double;
+	    ++zij;
+	  }
+	}
+	//diagonal (_j,_j) of on diagonal (J_,J_) submatrix
+	j=numPoints-1; //avoids an i loop of length 0
+	Jj=(Jder+1)*numPoints+j;
+	R(Jj,Jj)=theta_Jder_squared;
+
+
+	//do the off diagonal (I_,J_) submatrices
+	for(int Ider=Jder+1; Ider<numVarsr; ++Ider) {
+	  //off diagonal (I_,J_) submatrix
+	  zij=0;
+	  for(j=0; j<numPoints-1; ++j) {//j<numPoints-1 avoids an i loop of length 0
+	    //diagonal (_j,_j) of off-diagonal (I_,J_) submatrix
+	    Jj=(Jder+1)*numPoints+j;
+	    Ij=(Ider+1)*numPoints+j;
+	    R(Ij,Jj)=0.0;
+	    R(Jj,Ij)=0.0;
+
+	    for(int i=j+1; i<numPoints; ++i) {
+	      //off diagonal (_i,_j) of off-diagonal (I_,J_) submatrix
+	      Ii=(Ider+1)*numPoints+i;
+	      Ji=(Jder+1)*numPoints+i;
+	      temp_double=
+		matern_1pt5_d1_mult_r(theta_Jder,-deltaXR(zij,Jder))*R(Ii, j);
+	      R(Ii,Jj)= temp_double;
+	      R(Ij,Ji)= temp_double;
+	      R(Ji,Ij)= temp_double;
+	      R(Jj,Ii)= temp_double;
+	      ++zij;
+	    }
+	  }
+	  //diagonal (_j,_j) of off-diagonal (I_,J_) submatrix
+	  j=numPoints-1; //avoids an i loop of length 0
+	  Ij=(Ider+1)*numPoints+j;
+	  Jj=(Jder+1)*numPoints+j;
+	  R(Ij,Jj)=0.0;
+	  R(Jj,Ij)=0.0;
+	}
+      }
+
+    } else if((corrFunc==MATERN_CORR_FUNC)&&(maternCorrFuncNu==2.5)) {
+      //now handle the first order derivative submatrices, indiviually the first
+      //order derivative SUBmatrices are anti-symmetric but the whole matrix is
+      //symmetric
+      int Ii, Ij, Jj, Ji; //first letter identifies index OF derivative
+      //submatrix second letter identifies index INTO derivative SUBmatrix
+      for(int Ider=0; Ider<numVarsr; ++Ider) {
+	zij=0;
+	double theta_Ider=theta(Ider,0);
+	for(j=0; j<numPoints-1; ++j) {//j<numPoints-1 avoids an i loop of
+	  //length 0
+
+	  //diagonal (_j,_j) of off diagonal (I_, _) submatrix
+	  Ij=(Ider+1)*numPoints+j;
+	  R(Ij, j)=0.0;
+	  R( j,Ij)=0.0;
+	  //Ij=(Ider+1)*numPoints+j;
+	  for(int i=j+1; i<numPoints; ++i) {
+	    //off diagonal (_i,_j) of off-diagonal (I_, _) submatrix
+	    Ii=(Ider+1)*numPoints+i;
+	    temp_double=
+	      matern_2pt5_d1_mult_r(theta_Ider,deltaXR(zij,Ider))*R( i, j);
+	    R(Ii, j)= temp_double;
+	    R( j,Ii)= temp_double; //whole R matrix is symmetric
+	    R(Ij, i)=-temp_double;
+	    R( i,Ij)=-temp_double; //off-diagonal 1st order (actually all odd
+	    //order) derivative SUBmatrices are anti-symmetric
+	    ++zij;
+	  }
+	}
+	//diagonal (_j,_j) of off diagonal (I_, _) submatrix
+	j=numPoints-1; //avoids an i loop of length 0
+	Ij=(Ider+1)*numPoints+j;
+	R(Ij, j)=0.0;
+	R( j,Ij)=0.0;
+      }
+
+      //note that all 2nd order (actually all even order) derivative SUBmatrices
+      //are symmetric because the hadamard product of 2 (actually any even
+      //number of) anti-symmetric matrices is a symmetric matrix
+      double theta_Jder;
+      double theta_Jder_squared_div_3;
+      for(int Jder=0; Jder<numVarsr; ++Jder) {
+	//do the on diagonal (J_,J_) submatrix
+	theta_Jder=theta(Jder,0);
+	theta_Jder_squared_div_3=theta_Jder*theta_Jder/3.0;
+	zij=0;
+	for(j=0; j<numPoints-1; ++j) { //j<numPoints-1 avoids an i loop of length 0
+	  //diagonal (_j,_j) of on diagonal (J_,J_) submatrix
+	  Jj=(Jder+1)*numPoints+j;
+	  R(Jj,Jj)=theta_Jder_squared_div_3;
+	  for(int i=j+1; i<numPoints; ++i) {
+	    //off diagonal (_i,_j) of on-diagonal (J_,J_) submatrix
+	    Ji=(Jder+1)*numPoints+i;
+	    temp_double=//neg sign because d^2/dXR1dXR2 instead of d^2/dXR1^2
+	      -matern_2pt5_d2_mult_r(theta_Jder,deltaXR(zij,Jder))*R( i, j);
+	    R(Ji,Jj)=temp_double;
+	    R(Jj,Ji)=temp_double;
+	    ++zij;
+	  }
+	}
+	//diagonal (_j,_j) of on diagonal (J_,J_) submatrix
+	j=numPoints-1; //avoids an i loop of length 0
+	Jj=(Jder+1)*numPoints+j;
+	R(Jj,Jj)=theta_Jder_squared_div_3;
+
+
+      	//do the off diagonal (I_,J_) submatrices
+	for(int Ider=Jder+1; Ider<numVarsr; ++Ider) {
+	  //off diagonal (I_,J_) submatrix
+	  zij=0;
+	  for(j=0; j<numPoints-1; ++j) {//j<numPoints-1 avoids an i loop of length 0
+	    //diagonal (_j,_j) of off-diagonal (I_,J_) submatrix
+	    Jj=(Jder+1)*numPoints+j;
+	    Ij=(Ider+1)*numPoints+j;
+	    R(Ij,Jj)=0.0;
+	    R(Jj,Ij)=0.0;
+
+	    for(int i=j+1; i<numPoints; ++i) {
+	      //off diagonal (_i,_j) of off-diagonal (I_,J_) submatrix
+	      Ii=(Ider+1)*numPoints+i;
+	      Ji=(Jder+1)*numPoints+i;
+	      temp_double=
+		matern_2pt5_d1_mult_r(theta_Jder,-deltaXR(zij,Jder))*R(Ii, j);
+	      R(Ii,Jj)= temp_double;
+	      R(Ij,Ji)= temp_double;
+	      R(Ji,Ij)= temp_double;
+	      R(Jj,Ii)= temp_double;
+	      ++zij;
+	    }
+	  }
+	  //diagonal (_j,_j) of off-diagonal (I_,J_) submatrix
+	  j=numPoints-1; //avoids an i loop of length 0
+	  Ij=(Ider+1)*numPoints+j;
+	  Jj=(Jder+1)*numPoints+j;
+	  R(Ij,Jj)=0.0;
+	  R(Jj,Ij)=0.0;
+	}
+      }
+    } else{
+      std::cerr << "Unknown or Invalid Correlation function for Gradient Enhanced Kriging in void KrigingModel::correlation_matrix(const MtxDbl& theta)\n";
+      assert(false);
+    }
+    /*
+    printf("theta=[%14.8g",theta(0,0));
+    for(int k=1; k<numVarsr; ++k)
+      printf(" %14.8g",theta(k,0));
+    printf("]^T\n");
+    printf("M3/2 GEK R=\n");
+    for(int i=0; i<numEqnAvail; ++i) {
+      for(int j=0; j<numEqnAvail; ++j)
+	printf("%14.8g ",R(i,j));
+      printf("\n");
+    }
+    printf("\n\n");
+    */
+
+  }
+
+  return;
+}
+
+/** the Z matrix is defined as Z(k,ij)=-(XR(i,k)-XR(j,k))^2 where
+    ij=i+j*XR.getNRows(), it enables the efficient repeated calculation
+    of the R matrix during model construction:
+    R=reshape(exp(Z*theta),XR.getNRows(),XR.getNRows()) where theta is
+    the vector of correlations and * is matrix vector multiplication,
+    note that the Z matrix is independent of the correlation vector so
+    it can be formed once and later during the search for a good
+    correlation vector, the matrix vector product Z*theta can be
+    performed efficiently (for each member of the set of candidate
+    theta vectors) by calling BLAS. Z and XR are member variables so
+    they don't need to be passed in, KRD wrote this,  */
+MtxDbl& KrigingModel::gen_Z_matrix()
+{
+#ifdef __KRIG_ERR_CHECK__
+  assert((XR.getNRows()==numVarsr)&&(XR.getNCols()==numPoints));
+#endif
+  int ncolsZ=nchoosek(numPoints,2);
+  Z.newSize(numVarsr,ncolsZ);
+
+  if(buildDerOrder>0) {
+    //deltaXR is only needed for GEK
+    deltaXR.newSize(ncolsZ,numVarsr); //this ordering (transpose of Z)
+    //is useful for constructing the GEK R matrix
+  }
+
+  int ij=0;
+  if(corrFunc==GAUSSIAN_CORR_FUNC)  {
+    // ****************************************************************
+    // The Gausssian Correlation Function
+    // can be used for Gradient Enhanced Kriging (GEK)
+    // (or more generally, for derivative enhanced Kriging) because
+    // it is C infinity continuous.
+    // It uses Z(k,ij) = -(XR(k,i)-XR(k,j))^2
+    // if GEK is used we will also compute and store
+    // deltaXR(ij,k) = XR(k,i)-XR(k,j), the order of indexes is correct
+    // this transposed ordering is useful for efficient computation of
+    // the GEK R matrix (using the SUBmatrix construction process)
+    // ****************************************************************
+    double dXR; //a temporary variable to make squaring easier
+    if(buildDerOrder>0)
+      for(int j=0; j<numPoints-1; ++j)
+	for(int i=j+1; i<numPoints; ++i, ++ij)
+	  for(int k=0; k<numVarsr; k++) {
+	    dXR=XR(k,i)-XR(k,j);
+	    deltaXR(ij,k)=dXR;
+	    Z(k,ij)=-dXR*dXR;
+	  }
+    else
+      for(int j=0; j<numPoints-1; ++j)
+	for(int i=j+1; i<numPoints; ++i, ++ij)
+	  for(int k=0; k<numVarsr; k++) {
+	    dXR=XR(k,i)-XR(k,j);
+	    Z(k,ij)=-dXR*dXR;
+	  }
+  } else if((corrFunc==EXP_CORR_FUNC)||(corrFunc==MATERN_CORR_FUNC)) {
+    // ****************************************************************
+    // The Exponential and Matern 3/2 and 5/2 Correlation Functions
+    // all use Z(k,ij) = -|XR(k,i)-XR(k,j)|
+    // the Exponential Correlation Function
+    //     can NOT be used for Gradient Enhanced Kriging (GEK)
+    // the Matern 3/2 and 5/2 Correlation Functions
+    //     CAN be used for Gradient Enhanced Kriging (GEK)
+    // if GEK is used we will also compute and store
+    //     deltaXR(ij,k) = XR(k,i)-XR(k,j), the order of indexes is
+    //     correct this transposed ordering is useful for efficient
+    //     computation of the GEK R matrix (using the SUBmatrix
+    //     construction process)
+    // ****************************************************************
+    if(buildDerOrder>0) {
+      if(corrFunc==EXP_CORR_FUNC) {
+	std::cerr << "the exponential correlation function is not a valid choice for Gradient Enhanced Kriging\n";
+	assert(!((corrFunc==EXP_CORR_FUNC)&&(buildDerOrder>0)));
+      }
+      for(int j=0; j<numPoints-1; ++j)
+	for(int i=j+1; i<numPoints; ++i, ++ij)
+	  for(int k=0; k<numVarsr; k++) {
+	    deltaXR(ij,k)=XR(k,i)-XR(k,j);
+	    Z(k,ij)=-std::fabs(deltaXR(ij,k));
+	  }
+    } else
+      for(int j=0; j<numPoints-1; ++j)
+	for(int i=j+1; i<numPoints; ++i, ++ij)
+	  for(int k=0; k<numVarsr; k++)
+	    Z(k,ij)=-std::fabs(XR(k,i)-XR(k,j));
+  } else if(corrFunc==POW_EXP_CORR_FUNC) {
+    // ****************************************************************
+    // The Powered Exponential Correlation Function
+    // uses Z(k,ij) = -(|XR(k,i)-XR(k,j)|^powExpCorrFuncPow)
+    // where 1.0<powExpCorrFuncPow<2.0
+    // It can NOT be used for Gradient Enhanced Kriging (GEK)
+    // ****************************************************************
+    if(buildDerOrder>0) {
+      std::cerr << "the powered exponential correlation function is not a valid choice for Gradient Enhanced Kriging\n";
+      assert(!((corrFunc==POW_EXP_CORR_FUNC)&&(buildDerOrder>0)));
+    }
+    for(int j=0; j<numPoints-1; ++j)
+      for(int i=j+1; i<numPoints; ++i, ++ij)
+	for(int k=0; k<numVarsr; k++)
+	  Z(k,ij)=-std::pow(std::fabs(XR(k,i)-XR(k,j)),powExpCorrFuncPow);
+  } else{
+    std::cerr << "unknown Correlation Function in MtxDbl& KrigingModel::gen_Z_matrix()\n";
+    assert(false);
+  }
+  return Z;
+}
+
+void KrigingModel::reorderCopyRtoRChol() {
+  numRowsR=numEqnAvail;
+  RChol.newSize(numRowsR,numRowsR);
+
+  if(buildDerOrder==0) {
+    //Kriging
+    for(int jpt=0; jpt<numPoints; ++jpt) {
+      int jsrc=iPtsKeep(jpt,0);
+      for(int ipt=0; ipt<numPoints; ++ipt)
+	RChol(ipt,jpt)=R(iPtsKeep(ipt,0),jsrc);
+    }
+  } else if(buildDerOrder==1) {
+    //Gradient Enhanced Kriging, R is blocked into (1+numVarsr) by (1+numVarsr)
+    //submatrices.  Each submatrix has numPoints by numPoints elements, i.e.
+    //the same size as the Kriging R matrix, in fact the upper-left-most
+    //submatrix is the Kriging R matrix, we need to reorder this so that
+    //"Whole Points" (a function value immediately followed by its gradient)
+    //are listed in the order given in iPtsKeep
+
+    int i, j;
+    for(int jpt=0, j=0; jpt<numPoints; ++jpt)
+      for(int jder=-1; jder<numVarsr; ++jder, ++j) {
+	int jsrc=iPtsKeep(jpt,0)+(jder+1)*numPoints;
+	for(int ipt=0, i=0; ipt<numPoints; ++ipt)
+	  for(int ider=-1; ider<numVarsr; ++ider, ++i)
+	    RChol(i,j)=R(iPtsKeep(ipt,0)+(ider+1)*numPoints,jsrc);
+      }
+  } else {
+    std::cerr << "buildDerOrder=" << buildDerOrder
+	      << " in void KrigingModel::reorderCopyRtoRChol(); "
+	      << "for Kriging buildDerOrder must be 0; "
+	      << "for Gradient Enhanced Kriging buildDerOrder must be 1; "
+	      << "Higher order derivative enhanced Kriging "
+	      << "(e.g Hessian Enhanced Kriging) has not been implemented"
+	      << std::endl;
+    assert(false);
+  }
+  return;
+}
+
+void KrigingModel::nuggetSelectingCholR(){
+  if(buildDerOrder==0)
+    numExtraDerKeep=0;
+  else if(buildDerOrder==1)
+    numExtraDerKeep=numVarsr; //the last point will have all of the gradient
+  else{
+    std::cerr << "buildDerOrder=" << buildDerOrder
+	      << " in void KrigingModel::nuggetSelectingCholR(); "
+	      << "for Kriging buildDerOrder must be 0; "
+	      << "for Gradient Enhanced Kriging buildDerOrder must be 1; "
+	      << "Higher order derivative enhanced Kriging "
+	      << "(e.g Hessian Enhanced Kriging) has not been implemented"
+	      << std::endl;
+    assert(false);
+  }
+  numWholePointsKeep=numPointsKeep=numPoints;
+
+  double min_allowed_rcond=1.0/maxCondNum;
+  int ld_RChol=RChol.getNRowsAct();
+  rcondDblWork.newSize(3*ld_RChol,1);
+  rcondIntWork.newSize(ld_RChol,1);
+  scaleRChol.newSize(numEqnAvail,1); //scaling/equilibrating is only
+  //necessary if GEK is used (because Kriging already has all ones on
+  //the diagonal of R; GEK doesn't) but the generic Cholesky
+  //factorization won't know in advance whether it's needed or not
+  //you can calculate rcond essentially "for free" if you do it at the
+  //same time as the Cholesky factorization
+  int chol_info;
+
+  //point order is the default point order
+  for(int ipt=0; ipt<numPointsKeep; ++ipt)
+    iPtsKeep(ipt,0)=ipt;
+  if(ifAssumeRcondZero==true)
+    rcondR=0.0;
+  else {
+    //but if GEK is used I still need to reorder from derivative submatrix
+    //blocks to whole point order
+    reorderCopyRtoRChol();
+
+    //See the end of the KrigingModel constructor for why Y and Gtran are
+    //what we already need them to be.
+    //the maximumAllowedPolyOrder given the number of Points is already
+    //selected, and Gtran is already what we need it to be
+
+    nug=0.0;
+    Chol_fact_workspace(RChol,scaleRChol,rcondDblWork,rcondIntWork,
+			chol_info,rcondR);
+  }
+
+  //this rcondR is for the equilibrated R/RChol (so pretend it has all
+  //ones on the diagonal)
+  if(rcondR<=min_allowed_rcond) {
+    double dbl_num_eqn=static_cast<double>(numEqnAvail);
+    double sqrt_num_eqn=std::sqrt(dbl_num_eqn);
+    min_allowed_rcond*=sqrt_num_eqn; //one norm is within a factor of N^0.5
+    //of 2 norm
+    rcondR/=sqrt_num_eqn; //one norm is within a factor of N^0.5 of 2 norm
+    double min_eig_worst=(rcondR*dbl_num_eqn)/(1.0+(dbl_num_eqn-1.0)*rcondR);
+    double max_eig_worst=dbl_num_eqn-(dbl_num_eqn-1.0)*min_eig_worst;
+    nug=(min_allowed_rcond*max_eig_worst-min_eig_worst)/
+      (1.0-min_allowed_rcond);
+    //this nugget will make the worst case scenario meet (with an ==)
+    //the maxCondNum constraint, I (KRD) don't expect this to
+    //ever == fail because I don't expect rcond to be *N^-0.5 without
+    //nugget and be *N^0.5 with nugget while the maximum eigen value
+    //of R (without nugget) is N-(N-1)*min_eigval (that comes from
+    //assumming all eigenvalues except the largest are the smallest
+    //possible for the given rcond) note that rcond is the LAPACK
+    //ESTIMATE of the 1 norm condition number so there are no 100%
+    //guarantees.
+    apply_nugget_build(); //multiply the diagonal elements by (1.0+nug)
+    reorderCopyRtoRChol();
+
+    Chol_fact_workspace(RChol,scaleRChol,rcondDblWork,rcondIntWork,
+			chol_info,rcondR);
+  }
+  return;
+}
+
+
+/* use Pivoted Cholesky to efficiently select an optimal subset
+   of available build points from which to construct the Gaussian
+   Process.  Here "optimal" means that, given the current set of
+   assumed correlation parameters, this subset maximizes the
+   amount of unique information content in R, note that this is
+   equivalent to a "best spaced" (for the chosen correlation
+   function and its parameters) set of points and the output at
+   those points does is not considered.  Thus if you have 2 points
+   that are very close together but on opposite sides of a
+   discontinutity it is highly likely that at least one of them
+   will get discarded */
+void KrigingModel::equationSelectingCholR(){
+  if(!((buildDerOrder==0)||(buildDerOrder==1))) {
+    std::cerr << "buildDerOrder=" << buildDerOrder
+	      << " in void KrigingModel::equationSelectingCholR().  "
+	      << "For Kriging buildDerOrder must equal 0.  "
+	      << "For Gradient Enhanced Kriging (GEK) buildDerOrder "
+	      << "must equal 1.  Higher order derivative enhanced "
+	      << "Kriging (e.g. Hessian Enhanced Kriging) has not "
+	      << "been implemented." << std::endl;
+    assert(false);
+  }
+
+  //polyOrder=polyOrderRequested;
+  nTrend=numTrend(polyOrderRequested,0);
+  Rinv_Gtran.newSize(numEqnAvail,nTrend);
+
+
+  //printf("Entered equationSelectingCholR()\n");
+  double min_allowed_rcond=1.0/maxCondNum;
+  //printf("min_allowed_rcond=%g\n",min_allowed_rcond);
+  //exit(0);
+  //double min_allowed_pivot_est_rcond=256.0/maxCondNum;
+
+  int ld_RChol=RChol.getNRowsAct();
+  //printf("ld_RChol=%d\n",ld_RChol);
+  int chol_info;
+  RChol.newSize(numPoints,numPoints);
+  scaleRChol.newSize(numEqnAvail,3); //maximum space needed
+  rcondDblWork.newSize(3*ld_RChol,1);
+  rcondIntWork.newSize(ld_RChol,1);
+  ld_RChol=RChol.getNRowsAct();
+
+  iPtsKeep.newSize(numPoints,1);
+  //assign the default order to points
+  for(int ipt=0; ipt<numPoints; ++ipt)
+    iPtsKeep(ipt,0)=ipt;
+
+  if(buildDerOrder==0) {
+    //We're using regular Kriging not GEK and for large matrices
+    //the pivoted Cholesky algorithm is nowhere close to as fast
+    //as the highly optimized level 3 LAPACK Cholesky so to make
+    //this run faster on average we're going to attempt to use
+    //the LAPACK cholesky take a look at the rcondR and then only
+    //do Pivoted Cholesky if we actually need to
+    RChol.copy(R);
+
+    //no scaling is necessary since have all ones on the diagonal
+    //of the Kriging R but the generic equilibrated Cholesky
+    //factoriztion function doesn't know that in advance
+    Chol_fact_workspace(RChol,scaleRChol,rcondDblWork,rcondIntWork,
+			chol_info,rcondR);
+    if(min_allowed_rcond<rcondR) {
+      numRowsR=numWholePointsKeep=numPointsKeep=numPoints;
+
+      Y.copy(Yall);
+
+      nTrend=numTrend(polyOrderRequested,0);
+      Gtran.newSize(numPoints,nTrend);
+      for(int itrend=0; itrend<nTrend; ++itrend)
+	for(int ipt=0; ipt<numPoints; ++ipt)
+	  Gtran(ipt,itrend)=Gall(itrend,ipt);
+
+      return;
+    }
+  }
+
+  // *******************************************************************
+  // in this section I need to get an optimally reordered Cholesky
+  // factorization of the Kriging or GEK R matrix and I need to compute
+  // the one norm for all sizes of that optimally reordered R matrix
+  // *******************************************************************
+  // We got here in one of two ways
+  //
+  // 1) We're doing Kriging and we actually need to do Pivoted Cholesky
+  //
+  // 2) We're doing Gradient Enhanced Kriging, and reordering "whole
+  //    points" (function value immediately followed by its derivatives)
+  //    according to the order of the Pivoted Cholesky on the Kriging R
+  //    works a lot better and is a lot faster than doing Pivoted
+  //    Cholesky on the GEK R.  In fact because the GEK R is so much
+  //    larger than the Kriging R, the cost of doing pivoted Cholesky on
+  //    the Kriging R will be insignificant compared to the cost of
+  //    doing LAPACK Cholesky on the GEK R so I'm just going to go ahead
+  //    and reorder the GEK R whether I need to or not (which I don't
+  //    know at this point anyway) rather than risk having to do the
+  //    LAPACK Cholesky twice
+
+  //if the user specifies an anchor point it must be the first point to
+  //prevent it from being pivoted away
+  if(ifHaveAnchorPoint&&(iAnchorPoint!=0)) {
+    iPtsKeep(iAnchorPoint,0)=0;
+    iPtsKeep(0,0)=iAnchorPoint;
+  }
+  else iAnchorPoint=0;
+
+  for(int jpt=0; jpt<numPoints; ++jpt) {
+    int jsrc=iPtsKeep(jpt,0);
+    for(int ipt=0; ipt<numPoints; ++ipt)
+      RChol(ipt,jpt)=R(iPtsKeep(ipt,0),jsrc);
+  }
+
+  int info=0;
+  char uplo='B'; //'B' means we have both halves of R in RChol so the
+  //fortran doesn't have to copy one half to the other, having both
+  //halves makes the memory access faster (can always go down columns)
+  numPointsKeep=numPoints;
+  PIVOTCHOL_F77(&uplo, &numPoints, RChol.ptr(0,0), &ld_RChol,
+    		iPtsKeep.ptr(0,0), &numPointsKeep, &min_allowed_rcond,
+		&info);
+
+  //for(int ipt=0; ipt<numPoints; ++ipt)
+  //printf("F77 iPtsKeep(%d)=%d\n",ipt,iPtsKeep(ipt,0));
+  //printf("\n");
+
+  //printf("*********************************\n");
+
+  if(ifHaveAnchorPoint&&(iAnchorPoint!=0)) {
+    iPtsKeep(0,0)=iAnchorPoint;
+    for(int ipt=1; ipt<numPoints; ++ipt) {
+      iPtsKeep(ipt,0)-=1; //Fortran indices start at 1 not zero so
+      //we have to convert to C++ indices which start from 0
+      if(iPtsKeep(ipt,0)==iAnchorPoint)
+	iPtsKeep(ipt,0)=0;
+    }
+  }
+  else {
+    for(int ipt=0; ipt<numPoints; ++ipt) {
+      iPtsKeep(ipt,0)-=1; //Fortran indices start at 1 not zero so
+      //we have to convert to C++ indices which start from 0
+      //printf("iPtsKeep(%2d,0)=%d\n",ipt,iPtsKeep(ipt,0));
+    }
+    //printf("\n");
+  }
+
+  //if I feed LAPACK a one norm of R and a Cholesky factorization of
+  //R it will give me back an rcond for O(N^2) ops which is practically
+  //free compared to the O(N^3) ops that the Cholesky factorization
+  //costs, the wonderful thing is if I just drop equations off the end
+  //of a pivoted Cholesky factorization I can get the rcondR for any
+  //number of rows/columns of the pivoted R matrix, but to make this
+  //efficient I need to get the one norms of R cheaply (easily doable,
+  //that's what happens in the next if Kriging els if GEK statement)
+  //and I'll use bisection to find the last equation I can retain
+  int iprev_lapack_rcondR;
+  int icurr_lapack_rcondR=numEqnAvail-1;
+  int num_eqn_keep=numEqnAvail;
+  oneNormR.newSize(numEqnAvail,1);
+  sumAbsColR.newSize(numEqnAvail,1);
+  if(buildDerOrder==0) {
+    //Kriging we need to compute the one-norms for the reordered Kriging
+    //R matrix
+    //the one norm is the largest of the sums of the absolute value of
+    //any of the columns, of course how many rows there are affects what
+    //the sums of absolute value of the columns are and how many columns
+    //there are affects which is the largest but we can build this up in
+    //such a way as to reuse the information from smaller numbers of
+    //rows/columns for larger numbers of rows/columns
+
+    int jsrc=iPtsKeep(0,0);
+    for(int ipt=0; ipt<numPoints; ++ipt)
+      sumAbsColR(ipt,0)=std::fabs(R(iPtsKeep(ipt,0),jsrc));
+    oneNormR(0,0)=sumAbsColR(0,0); //this is the one norm for the 1 by
+    //1 reordered R matrix
+
+    double tempdouble;
+    for(int jpt=1; jpt<numPoints; ++jpt) {
+      jsrc=iPtsKeep(jpt,0);
+      for(int ipt=0; ipt<numPoints; ++ipt)
+	sumAbsColR(ipt,0)+=std::fabs(R(iPtsKeep(ipt,0),jsrc));
+      tempdouble=sumAbsColR(0,0);
+      for(int ipt=1; ipt<=jpt; ++ipt)
+	if(tempdouble<sumAbsColR(ipt,0))
+	  tempdouble=sumAbsColR(ipt,0);
+      oneNormR(jpt,0)=tempdouble; //this is the one norm for the
+      //jpt by jpt reordered R matrix
+    }
+    uplo='L'; //get it into the same state as GEK
+    iprev_lapack_rcondR=0; //a 1 by 1 matrix has a condition number of 1
+
+  } else if(buildDerOrder==1){
+    //Gradient Enhanced Kriging
+    //it works better (and is a lot faster) if we reorder whole points
+    //according to the Pivoted Cholesky ON THE KRIGING R order in iPtsKeep
+    //so we'll calculate the one norm for all sizes of the reordered
+    //GEK R and then Cholesky factorize the GEK R
+    reorderCopyRtoRChol();
+    /*
+    printf("R=\n");
+    for(int i=0; i<numEqnAvail; ++i) {
+      for(int j=0; j<numEqnAvail; ++j)
+	printf("%14.8g ",RChol(i,j));
+      printf("\n");
+    }
+    printf("\n");
+    */
+    scaleRChol.newSize(numEqnAvail,2);
+    for(int i=0; i<numEqnAvail; ++i) {
+      scaleRChol(i,1)=std::sqrt(RChol(i,i));
+      scaleRChol(i,0)=1.0/scaleRChol(i,1);
+    }
+
+    //equilibrate RChol
+    for(int j=0; j<numEqnAvail; ++j) {
+      for(int i=0; i<numEqnAvail; ++i)
+	RChol(i,j)*=scaleRChol(i,0)*scaleRChol(j,0);
+      RChol(j,j)=1.0; //there is zero correlation between an individual
+      //point's function value and its derivatives so we know how to fix
+      //round of error so just do it
+    }
+    /*
+    printf("RE=\n");
+    for(int i=0; i<numEqnAvail; ++i) {
+      for(int j=0; j<numEqnAvail; ++j)
+	printf("%14.8g ",RChol(i,j));
+      printf("\n");
+    }
+    printf("\n");
+    */
+    //the one norm number is the largest of the sums of the absolute
+    //value of any of the columns of the matrix, of course how many rows
+    //there are affects what the sums of absolute value of the columns
+    //are and how many columns there are affects which is the largest
+    //but we can build this up in such a way as to reuse the information
+    //from smaller numbers of rows/columns for larger numbers of rows/
+    //columns
+
+    //right now RChol holds the reordered R matrix
+    for(int i=0; i<numEqnAvail; ++i)
+      sumAbsColR(i,0)=std::fabs(RChol(i,0));
+    oneNormR(0,0)=sumAbsColR(0,0); //this is the one norm for the 1 by
+    //1 reordered R matrix
+
+    double tempdouble;
+    for(int j=1; j<numEqnAvail; ++j) {
+      for(int i=0; i<numEqnAvail; ++i)
+	sumAbsColR(i,0)+=std::fabs(RChol(i,j));
+      tempdouble=sumAbsColR(0,0);
+      for(int i=1; i<=j; ++i)
+	if(tempdouble<sumAbsColR(i,0))
+	  tempdouble=sumAbsColR(i,0);
+      oneNormR(j,0)=tempdouble;  //this is the one norm for the
+      //j by j reordered R matrix
+    }
+
+    //do the (highly optimized) LAPACK Cholesky Decomposition of all
+    //the equations (but sorted into the point order determined by
+    //the pivoting cholesky above)
+    uplo='L';
+    DPOTRF_F77(&uplo,&numEqnAvail,RChol.ptr(0,0),&ld_RChol,&info);
+
+    //Kriging already has the rcondR so to get GEK into an equivalent
+    //state we will feed LAPACK the one norm of the full GEK R (after
+    //the reordering and equilibration) and it will give me back GEK's
+    //rcondR
+    DPOCON_F77(&uplo,&numEqnAvail,RChol.ptr(0,0),&ld_RChol,
+	       oneNormR.ptr(icurr_lapack_rcondR,0),
+	       &rcondR,rcondDblWork.ptr(0,0),rcondIntWork.ptr(0,0),
+	       &info);
+
+    //printf("rcond(RE)=%g icurr_lapack_rcondR=%d\n",rcondR,icurr_lapack_rcondR);
+
+    //the first derivatives of the correlation at a point are uncorrelated
+    //with the correlation function at the same point, i.e. the (1+numVarsr)
+    //by (1+numVarsr) correlation matrix has a condition number of 1
+    iprev_lapack_rcondR=numVarsr; //no 1+ because C++ indexes start at zero
+  }
+
+  // *****************************************************************
+  // in this section we will efficiently determine the maximum number
+  // of equations that we can retain by doing a bisection search using
+  // O(log2(N)) calls of LAPACK's rcond estimate (each of which cost
+  // only O(n^2) ops where n is the number of eqns in the current
+  // subset
+  // *****************************************************************
+
+  lapackRcondR.newSize(numEqnAvail,1);
+  lapackRcondR(iprev_lapack_rcondR,0)=1.0; //since the condition number
+  //is one at iprev_lapack_rcondR we know we can keep at least this many
+  //equations
+
+  lapackRcondR(icurr_lapack_rcondR,0)=rcondR; //the maximum number
+  //of equations we can keep is icurr_lapack_rcondR=numEqnAvail-1
+  //and we know the rcondR for that many equations
+
+  //note num_eqn_keep is now numEqnAvail
+  int inext_lapack_rcondR=icurr_lapack_rcondR; //the last available eqn
+  if((rcondR<=min_allowed_rcond)&&
+     (inext_lapack_rcondR-iprev_lapack_rcondR==1)) {
+    //at this point the previous lapack rcondR==1.0
+    rcondR=1.0;
+    inext_lapack_rcondR=iprev_lapack_rcondR;
+    //printf("if1\n");
+  }
+
+  //do the bisection search if necessary, at most ceil(log2()) more
+  //calls to the LAPACK rcond function
+  int rcond_iter=0;
+  int max_rcond_iter=
+    std::ceil(std::log(static_cast<double>
+		       (inext_lapack_rcondR-iprev_lapack_rcondR))
+	      /std::log(2.0));
+  while((lapackRcondR(inext_lapack_rcondR,0)<=min_allowed_rcond)&&
+        (inext_lapack_rcondR>iprev_lapack_rcondR)) {
+    //printf("inWhile\n");
+    ++rcond_iter;
+    icurr_lapack_rcondR=(iprev_lapack_rcondR+inext_lapack_rcondR)/2;
+    num_eqn_keep=icurr_lapack_rcondR+1;
+
+    //the LAPACK rcond function
+    DPOCON_F77(&uplo,&num_eqn_keep,RChol.ptr(0,0),&ld_RChol,
+	       oneNormR.ptr(icurr_lapack_rcondR,0),
+	       &rcondR,rcondDblWork.ptr(0,0),rcondIntWork.ptr(0,0),
+	       &info);
+    lapackRcondR(icurr_lapack_rcondR,0)=rcondR;
+    //printf("rcond_iter=%d icurr_lapack_rcondR=%d rcondR=%g\n",
+    //rcond_iter,icurr_lapack_rcondR,rcondR);
+
+    if(rcondR<min_allowed_rcond)
+      inext_lapack_rcondR=icurr_lapack_rcondR;
+    else if(min_allowed_rcond<rcondR)
+      iprev_lapack_rcondR=icurr_lapack_rcondR;
+    else if(min_allowed_rcond==rcondR) {
+      //num_eqn_keep=icurr_lapack_rcondR+1;
+      break;
+    }
+    if((inext_lapack_rcondR-iprev_lapack_rcondR==1)||
+       (max_rcond_iter<rcond_iter)) {
+      num_eqn_keep=iprev_lapack_rcondR+1;
+      rcondR=lapackRcondR(iprev_lapack_rcondR,0);
+      break;
+    }
+  }
+  //printf(" pivoted_rcondR=%g numRowsR=%d\n",rcondR,num_eqn_keep);
+
+  numRowsR=num_eqn_keep; //this is the maximum number of equations that
+  //we can keep
+
+  // ***************************************************************
+  // in this section we downsize the arrays being retained and keep
+  // only the optimal subset, in or working copies
+  // ***************************************************************
+
+  RChol.resize(num_eqn_keep,num_eqn_keep); //resize() instead of newSize()
+  //because we want to keep the current contents in the same 2D
+  //order
+  /*
+  if(num_eqn_keep>=10) {
+    printf("RChol(1:10,1:10)=\n");
+    for(int i=0; i<10; ++i) {
+      for(int j=0; j<10; ++j)
+	printf("%12.6g ",RChol(i,j));
+      printf("\n");
+    }
+    printf("\n\n");
+  }
+  */
+
+  //polyOrder=polyOrderRequested; //redundant but for clarity
+
+  //the following while loop was commented out when adaptive selection of
+  //the trend basis functions via pivote cholesky factorization of G*R^-1*G^T
+  //was implemented
+  //while((numRowsR<=numTrend(polyOrder,0))&&(polyOrder>0))
+  //--polyOrder;
+
+  //nTrend=numTrend(polyOrder,0); //commented out because we now select a subset of Poly based using a Pivoted Cholesky factorization of G*R^-1*G^T which happens when trendSelectingPivotedCholesy is called by materObjectiveAndConstraints() (we no longer select SOLELY on polynomial order and number of points
+
+  nTrend=numTrend(polyOrderRequested,0);
+
+  //printf("num_eqn_keep=%d numRowsR=%d polyOrder=%d nTrend=%d rcondR=%g lapackRcondR(num_eqn_keep-1,0)=%g\n",num_eqn_keep,numRowsR,polyOrder,nTrend,rcondR,lapackRcondR(num_eqn_keep-1,0));
+
+  //we need to downsize Gtran now but we only need to downsize Poly at
+  //the end of create()
+  Gtran.newSize(num_eqn_keep,nTrend); //newSize() because we don't care
+ //about the current contents of Gtran
+
+  Y.newSize(num_eqn_keep,1); //newSize() because we don't care about
+  //the current contents of Y
+
+  if(buildDerOrder==0) {
+    // keep only the useful parts for Kriging
+    numWholePointsKeep=numPointsKeep=num_eqn_keep;
+    numExtraDerKeep=0;
+
+    /*
+    if(numPointsKeep>10) {
+
+      MtxDbl RCholDEBUG(numPointsKeep,numPointsKeep);
+      for(int jpt=0; jpt<numPointsKeep; ++jpt) {
+	int jsrc=iPtsKeep(jpt,0);
+	for(int ipt=0; ipt<numPointsKeep; ++ipt)
+	  RCholDEBUG(ipt,jpt)=R(iPtsKeep(ipt,0),jsrc);
+      }
+      double rcondRDEBUG;
+      int chol_info_debug;
+
+      printf("Rreorder(1:10,1:10)=\n");
+      for(int ipt=0; ipt<10; ++ipt) {
+	for(int jpt=0; jpt<10; ++jpt)
+	  printf("%12.6g ",RCholDEBUG(ipt,jpt));
+	printf("\n");
+      }
+      printf("\n\n");
+
+      Chol_fact_workspace(RCholDEBUG,scaleRChol,rcondDblWork,rcondIntWork,
+			  chol_info_debug,rcondRDEBUG);
+
+      printf("RChol(1:10,1:10)=\n");
+      for(int ipt=0; ipt<10; ++ipt) {
+	for(int jpt=0; jpt<10; ++jpt)
+	  printf("%12.6g ",RChol(ipt,jpt));
+	printf("\n");
+      }
+      printf("\n\n");
+
+      printf("RCholDEBUG(1:10,1:10)=\n");
+      for(int ipt=0; ipt<10; ++ipt) {
+	for(int jpt=0; jpt<10; ++jpt)
+	  printf("%12.6g ",RCholDEBUG(ipt,jpt));
+	printf("\n");
+      }
+      printf("\n\n");
+
+      printf("[RChol-RCholDEBUG](1:10,1:10)=\n");
+      for(int ipt=0; ipt<10; ++ipt) {
+	for(int jpt=0; jpt<10; ++jpt)
+	  printf("%12.6g ",RChol(ipt,jpt)-RCholDEBUG(ipt,jpt));
+	printf("\n");
+      }
+      printf("\n\n");
+
+      printf("rcondR=%g rcondRDEBUG=%g numPointsKeep=%d\nErrorRChol=\n",
+	     rcondR,rcondRDEBUG,numPointsKeep);
+
+    }
+    */
+
+    //keep the useful part of Y
+    for(int ipt=0; ipt<numPointsKeep; ++ipt)
+      Y(ipt,0)=Yall(iPtsKeep(ipt,0),0);
+
+    //keep the useful part of G (actually G^T)
+    for(int itrend=0; itrend<nTrend; ++itrend)
+      for(int ipt=0; ipt<numPointsKeep; ++ipt)
+	Gtran(ipt,itrend)=Gall(itrend,iPtsKeep(ipt,0));
+
+    /*
+    for(int ipt=0; ipt<numPointsKeep; ++ipt) {
+      printf("Gtran(%3d,:)=[%12.6g",ipt,Gtran(ipt,0));
+      for(int itrend=1; itrend<nTrend; ++itrend)
+	printf(", %12.6g",Gtran(ipt,itrend));
+      printf("] XR(:,%3d)=[%12.6g",ipt,XR(0,iPtsKeep(ipt,0)));
+      for(int k=1; k<numVarsr; ++k)
+	printf(", %12.6g",XR(k,iPtsKeep(ipt,0)));
+      printf("]^T Y(%3d,0)=%12.6g\n",ipt,Y(ipt,0));
+    }
+    printf("\n");
+    */
+
+
+  } else if(buildDerOrder==1) {
+    // keep on the useful parts for Gradient Ehanced Kriging
+
+    //integer division automatically rounds down
+    numWholePointsKeep=num_eqn_keep/(1+numVarsr);
+
+    //we also need to round up
+    numPointsKeep=
+      static_cast<int>(std::ceil(static_cast<double>(num_eqn_keep)/
+				 static_cast<double>(1+numVarsr)));
+
+    if(numPointsKeep==numWholePointsKeep) {
+      //perhaps a better name would be numLastDerKeep... this is the number
+      //of derivatives retained for the last point.
+      numExtraDerKeep==numVarsr;
+    } else
+      numExtraDerKeep=num_eqn_keep-(1+numWholePointsKeep*(1+numVarsr));
+
+    //we need to undo the equilibration of RChol, recall that scaleRChol
+    //is already in the pivoted Cholesky order
+    for(int j=0; j<num_eqn_keep; ++j)
+      for(int i=j; i<num_eqn_keep; ++i)
+	RChol(i,j)*=scaleRChol(i,1); //note that this assumes that the
+    //nkm::SurfMat class uses the lower triangular part of of RChol
+    //otherwise (if this function uses the lower triangular part but
+    //surfmat uses the upper triangular part) you'd need
+    //RChol(j,i)=RChol(i,j)*scaleRChol(i,j) but you could do a
+    //RChol(j,i)=RChol(i,j)*=ScaleRChol(i,1); just to be safe
+
+    //keep the useful part of G (actually G^T)
+    for(int itrend=0; itrend<nTrend; ++itrend) {
+      int i, ipt;
+      for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt) {
+	int isrc=iPtsKeep(ipt,0)*(1+numVarsr);
+	for(int k=0; k<1+numVarsr; ++k, ++isrc, ++i)
+	  Gtran(i,itrend)=Gall(itrend,isrc);
+      }
+      if(numPointsKeep>numWholePointsKeep) {
+#ifdef __KRIG_ERR_CHECK__
+	assert((ipt==numWholePointsKeep)&&
+	       (i==(numWholePointsKeep*(1+numVarsr)))&&
+	       (numWholePointsKeep+1==numPointsKeep));
+#endif
+	int isrc=iPtsKeep(ipt,0)*(1+numVarsr);
+	Gtran(i,itrend)=Gall(itrend,isrc);
+	++i;
+	++isrc;
+	for(int k=0; k<numExtraDerKeep; ++k, ++isrc, ++i)
+	  Gtran(i,itrend)=Gall(itrend,isrc);
+      }
+    }
+
+    //keep the useful part of Y, also we need to undo the
+    //equilibration of RChol
+    { //{ to impose scope
+      int i, ipt;
+      for(ipt=0, i=0; ipt<numWholePointsKeep; ++ipt) {
+	int isrc=iPtsKeep(ipt,0)*(1+numVarsr);
+	for(int k=0; k<1+numVarsr; ++k, ++isrc, ++i)
+	  Y(i,0)=Yall(isrc,0);
+      }
+      if(numPointsKeep>numWholePointsKeep) {
+#ifdef __KRIG_ERR_CHECK__
+	assert((ipt==numWholePointsKeep)&&
+	       (i==(numWholePointsKeep*(1+numVarsr)))&&
+	       (numWholePointsKeep+1==numPointsKeep));
+#endif
+	int isrc=iPtsKeep(ipt,0)*(1+numVarsr);
+	Y(i,0)=Yall(isrc,0);
+	++i;
+	++isrc;
+	for(int k=0; k<numExtraDerKeep; ++k, ++isrc, ++i)
+	  Y(i,0)=Yall(isrc,0);
+      }
+    } //{} to impose scope
+  } //else if(buildDerOrder==1) {
+
+  iPtsKeep.resize(numPointsKeep,1);
+
+  return;
+}
+
+/** G_Rinv_Gtran must be filled with G*R^-1*G^T prior to calling
+    void KrigingModel::trendSelectingPivotedCholesky() */
+void KrigingModel::trendSelectingPivotedCholesky(){
+
+  //nTrend=numTrend(polyOrderRequested,0);
+  iTrendKeep.newSize(nTrend,1);
+  double min_allowed_rcond=1.0/maxCondNum;
+  int num_trend_want=numTrend(polyOrderRequested,0);
+
+  int max_trend_to_keep=numRowsR-1-2*numVarsr;
+  if(numRowsR/2<max_trend_to_keep)
+    max_trend_to_keep=numRowsR/2;
+  if(num_trend_want<max_trend_to_keep)
+    max_trend_to_keep=num_trend_want;
+  if(max_trend_to_keep<1)
+    max_trend_to_keep=1;
+
+  //std::cout << "numRowrR=" << numRowsR << " nTrend=" << nTrend << " max_trend_to_keep=" << max_trend_to_keep << std::endl;
+
+  if(nTrend<=max_trend_to_keep) {
+    //do a LAPACK cholesky first
+    G_Rinv_Gtran_Chol.copy(G_Rinv_Gtran);
+    int chol_info;
+    Chol_fact_workspace(G_Rinv_Gtran_Chol,G_Rinv_Gtran_Chol_Scale,
+			G_Rinv_Gtran_Chol_DblWork,G_Rinv_Gtran_Chol_IntWork,
+			chol_info,rcond_G_Rinv_Gtran);
+    if(min_allowed_rcond<rcond_G_Rinv_Gtran) {
+      //the LAPACK Cholesky was not ill-conditioned with the desired
+      //full set of trend basis functions so we'll use them all
+      for(int itrend=0; itrend<nTrend; ++itrend)
+	iTrendKeep(itrend,0)=itrend;
+      return;
+    }
+  }
+
+  // *****************************************************************
+  // if we got here we need to do a Pivoted Cholesky factorization of
+  // G*R^-1*G^T to select an optimal subset of trend basis functions
+  // *****************************************************************
+
+  // we have a slight problem, basically the one trend function that
+  // is guaranteed to be selected is the one that has the largest
+  // diagonal element in G*R^-1*G^T, or if they are all the same it
+  // will be the first one.  we want to guarantee that the constant
+  // basis function (polynomial of order 0) is retained so we need to
+  // equilibrate G_Rinv_Gtran so that it has all ones on the diagonal
+  // and therefore that the constant trend basis function will be
+  // retained
+
+  // I realize this has R in the name but I don't want to allocate another
+  // variable
+  scaleRChol.newSize(nTrend,3);
+  for(int itrend=0; itrend<nTrend; ++itrend) {
+    scaleRChol(itrend,1)=std::sqrt(G_Rinv_Gtran(itrend,itrend));
+    scaleRChol(itrend,0)=1.0/scaleRChol(itrend,1);
+  }
+
+
+  for(int jtrend=0; jtrend<nTrend; ++jtrend) {
+    double tempdouble=scaleRChol(jtrend,0);
+    for(int itrend=0; itrend<nTrend; ++itrend)
+      G_Rinv_Gtran(itrend,jtrend)*=scaleRChol(itrend,0)*tempdouble;
+    G_Rinv_Gtran(jtrend,jtrend)=1.0; //numerical round off error could
+    //kill our guarantee of keeping the constant trend basis function
+    //so we'll just assign the right correct answer which is 1.0
+  }
+
+  G_Rinv_Gtran_Chol.copy(G_Rinv_Gtran);
+
+  int ld_G_Rinv_Gtran_Chol=G_Rinv_Gtran_Chol.getNRowsAct();
+  int info=0;
+  char uplo='B'; //'B' means we have both halves of G*R^-1*G^T in
+  //G_Rinv_Gtran_Chol so the FORTRAN doesn't have to copy one half to the
+  //other, having both halves makes the memory access faster (can always
+  //go down columns)
+  int num_trend_keep=-max_trend_to_keep; //RANK<0 on input tells PIVOTCHOL_F77
+  //that we only want to keep the first abs(RANK) entries so it can stop early
+  PIVOTCHOL_F77(&uplo, &nTrend, G_Rinv_Gtran_Chol.ptr(0,0),
+		&ld_G_Rinv_Gtran_Chol, iTrendKeep.ptr(0,0), &num_trend_keep,
+		&min_allowed_rcond, &info);
+
+  nTrend=num_trend_keep; //this is the maximum number of trend functions
+  //we could keep, we might not be able to keep this many
+
+  //FORTRAN indices start at 1 not zero so we have to convert to C++ indices
+  //which start at zero
+  for(int itrend=0; itrend<nTrend; ++itrend)
+    iTrendKeep(itrend,0)-=1;
+
+  // ************************************************************
+  // in this section I need to calculate the one norm for subsets
+  // of the first jtrend (reordered) rows/columns of the
+  // equilibrated G*R^-1*G^T , for jtrend=1 to nTrend
+  // ************************************************************
+
+  //I realize that this says R but I don't want to allocate another variable
+  oneNormR.newSize(nTrend,1);
+  //I realize that this says R but I don't want to allocate another variable
+  sumAbsColR.newSize(nTrend,1);
+  int jsrc=iTrendKeep(0,0);
+  for(int itrend=0; itrend<nTrend; ++itrend)
+    sumAbsColR(itrend,0)=std::fabs(G_Rinv_Gtran(iTrendKeep(itrend,0),jsrc));
+  oneNormR(0,0)=sumAbsColR(0,0); //this is the one norm for the 1 by
+  //1 reordered G_Rinv_Gtran matrix
+
+  double tempdouble;
+  for(int jtrend=1; jtrend<nTrend; ++jtrend) {
+    jsrc=iTrendKeep(jtrend,0);
+    for(int itrend=0; itrend<nTrend; ++itrend)
+      sumAbsColR(itrend,0)+=std::fabs(G_Rinv_Gtran(iTrendKeep(itrend,0),jsrc));
+    tempdouble=sumAbsColR(0,0);
+    for(int itrend=1; itrend<=jtrend; ++itrend)
+      if(tempdouble<sumAbsColR(itrend,0))
+	tempdouble=sumAbsColR(itrend,0);
+    oneNormR(jtrend,0)=tempdouble; //this is the one norm for the
+    //jtrend by jtrend reordered G_Rinv_Gtran matrix
+  }
+
+  ld_G_Rinv_Gtran_Chol=G_Rinv_Gtran_Chol.getNRowsAct(); //this probably hasn't changed
+  //but better safe than sorry
+  rcondDblWork.newSize(3*ld_G_Rinv_Gtran_Chol,1);
+  rcondIntWork.newSize(ld_G_Rinv_Gtran_Chol,1);
+  int icurr_lapack_rcond=nTrend-1;
+  uplo='L';
+  DPOCON_F77(&uplo,&nTrend,G_Rinv_Gtran_Chol.ptr(0,0),&ld_G_Rinv_Gtran_Chol,
+	     oneNormR.ptr(icurr_lapack_rcond,0),&rcond_G_Rinv_Gtran,
+	     rcondDblWork.ptr(0,0),rcondIntWork.ptr(0,0),&info);
+
+  //need to do a bisection search for the last trend function that we can keep
+  lapackRcondR(icurr_lapack_rcond,0)=rcond_G_Rinv_Gtran; //the maximum number
+  //of trend basis functions we can keep is icurr_lapack_rcond=nTrend-1
+  //and we know the rcond for that many equations
+  int iprev_lapack_rcond=0;
+  lapackRcondR(iprev_lapack_rcond,0)=1.0; //the constant trend funcation by
+  //itself is guaranteed to have condition # = 1.0
+
+  //note num_trend_keep is now nTrend
+  int inext_lapack_rcond=icurr_lapack_rcond; //the last available basis
+  //function
+  if((rcond_G_Rinv_Gtran<=min_allowed_rcond)&&
+     (inext_lapack_rcond-iprev_lapack_rcond==1)) {
+    //at this point the previous lapack rcondR==1.0
+    rcond_G_Rinv_Gtran=1.0;
+    inext_lapack_rcond=iprev_lapack_rcond;
+    //printf("if1\n");
+  }
+
+  //do the bisection search if necessary, at most ceil(log2()) more
+  //calls to the LAPACK rcond function
+  int rcond_iter=0;
+  int max_rcond_iter=
+    std::ceil(std::log(static_cast<double>
+		       (inext_lapack_rcond-iprev_lapack_rcond))
+	      /std::log(2.0));
+  while((lapackRcondR(inext_lapack_rcond,0)<=min_allowed_rcond)&&
+        (inext_lapack_rcond>iprev_lapack_rcond)) {
+    //printf("inWhile\n");
+    ++rcond_iter;
+    icurr_lapack_rcond=(iprev_lapack_rcond+inext_lapack_rcond)/2;
+    num_trend_keep=icurr_lapack_rcond+1;
+
+    //the LAPACK rcond function
+    DPOCON_F77(&uplo,&num_trend_keep,G_Rinv_Gtran_Chol.ptr(0,0),
+	       &ld_G_Rinv_Gtran_Chol,oneNormR.ptr(icurr_lapack_rcond,0),
+	       &rcond_G_Rinv_Gtran,rcondDblWork.ptr(0,0),
+	       rcondIntWork.ptr(0,0),&info);
+    lapackRcondR(icurr_lapack_rcond,0)=rcond_G_Rinv_Gtran;
+    //printf("rcond_iter=%d icurr_lapack_rcondR=%d rcondR=%g\n",
+    //rcond_iter,icurr_lapack_rcondR,rcondR);
+
+    if(rcond_G_Rinv_Gtran<min_allowed_rcond)
+      inext_lapack_rcond=icurr_lapack_rcond;
+    else if(min_allowed_rcond<rcond_G_Rinv_Gtran)
+      iprev_lapack_rcond=icurr_lapack_rcond;
+    else if(min_allowed_rcond==rcond_G_Rinv_Gtran) {
+      //num_trend_keep=icurr_lapack_rcond+1;
+      break;
+    }
+    if((inext_lapack_rcond-iprev_lapack_rcond==1)||
+       (max_rcond_iter<rcond_iter)) {
+      num_trend_keep=iprev_lapack_rcond+1;
+      rcond_G_Rinv_Gtran=lapackRcondR(iprev_lapack_rcond,0);
+      break;
+    }
+  }
+  //printf(" pivoted_rcondR=%g numRowsR=%d\n",rcondR,num_eqn_keep);
+
+  nTrend=num_trend_keep; //this is the maximum number of trend basis functions
+  //that we can keep
+
+  iTrendKeep.resize(nTrend,1);
+  //we don't want the basis functions in their optimal order we want them
+  //in their logical order, minus the ones we couldn't keep
+  iTrendKeep.sortRows();
+
+  //were going to copy the portion of the equilibrated G*R^-1*G^T matrix
+  //that were keeping, in its logical order, into G_Rinv_Gtran_Chol and then
+  //do a LAPACK cholesky on it, and then undo the equilibration
+  G_Rinv_Gtran_Chol.newSize(nTrend,nTrend);
+  for(int jtrend=0; jtrend<nTrend; ++jtrend) {
+    int jsrc=iTrendKeep(jtrend,0);
+    scaleRChol(jtrend,2)=scaleRChol(jsrc,1);
+    for(int itrend=0; itrend<nTrend; ++itrend)
+      G_Rinv_Gtran_Chol(itrend,jtrend)=G_Rinv_Gtran(iTrendKeep(itrend,0),jsrc);
+  }
+
+  //do the LAPACK cholesky factorization
+  int chol_info;
+  Chol_fact_workspace(G_Rinv_Gtran_Chol,G_Rinv_Gtran_Chol_Scale,
+		      G_Rinv_Gtran_Chol_DblWork,G_Rinv_Gtran_Chol_IntWork,
+		      chol_info,rcond_G_Rinv_Gtran);
+
+  //we still need to undo the equilbration because we copied the
+  //equilibrated G*R^-1*G^T matrix into G_Rinv_Gtran_Chol before doing
+  //the cholesky factorization
+  for(int jtrend=0; jtrend<nTrend; ++jtrend)
+    for(int itrend=jtrend; itrend<nTrend; ++itrend)
+      G_Rinv_Gtran_Chol(itrend,jtrend)*=scaleRChol(itrend,2);
+
+
+
+  for(int itrend=1; itrend<nTrend; ++itrend) {
+    int isrc=iTrendKeep(itrend,0);
+    if(itrend<isrc)
+      for(int i=0; i<numRowsR; ++i) {
+	Gtran(i,itrend)=Gtran(i,isrc);
+	Rinv_Gtran(i,itrend)=Rinv_Gtran(i,isrc);
+      }
+  }
+  Gtran.resize(numRowsR,nTrend);
+  Rinv_Gtran.resize(numRowsR,nTrend);
+
+  //and were done with the pivoted cholesky, but at the end of the optimization
+  //we will still need to discard the subset of Poly that was not selected by
+  //trendSelectingPivotedCholesky()
+  return;
+}
+
+
+
+
+
+/** this function calculates the objective function (negative "per equation"
+    log likelihood) and/or the constraint (reciprocal condition number)
+    functions using a precompute and store (store across sequential calls
+    to this function) strategy to reduce the computational cost, make sure
+    only to COPY OUT results from member variables so the state is not
+    changed
+*/
+void KrigingModel::masterObjectiveAndConstraints(const MtxDbl& theta, int obj_der_mode, int con_der_mode)
+{
+  // if(obj_der_mode=1) (1=2^0=> 0th derivative) calculate objective function
+  // if(con_der_mode=1) (1=2^0=> 0th derivative) calculate the constraint
+  //functions
+  // ERROR if(con_der_mode>=2) (2=2^1 = 1st derivative) this function does not
+  //                           support analytical derivatives of the objective
+  //                           function
+  // ERROR if(con_der_mode>=2) (2=2^1 = 1st derivative) this function does not
+  //                           support analytical derivatives of the constraint
+  //                           function
+
+  //printf("maxConDerMode=%d con_der_mode=%d maxObjDerMode=%d obj_der_mode=%d\n",
+  //maxConDerMode,con_der_mode,maxObjDerMode,obj_der_mode);
+
+  //might want to replace this with a thrown exception
+  assert((maxObjDerMode<=1)&&(maxConDerMode<=1)&&
+	 (0<=obj_der_mode)&&(obj_der_mode<=maxObjDerMode)&&
+	 (0<=con_der_mode)&&(con_der_mode<=maxConDerMode)&&
+	 ((1<=obj_der_mode)||(1<=con_der_mode)));
+
+  //if theta was the same as the last time we called this function than we can reuse some of the things we calculated last time
+
+  if(prevTheta.getNElems()!=numTheta) {
+    //different number of elements means we can't reuse
+    prevTheta.newSize(numTheta,1);
+    prevObjDerMode=prevConDerMode=0;
+  }
+  else
+    for(int k=0; k<numTheta; ++k)
+      if(prevTheta(k,0)!=theta(k,0)) {
+	//some parameter changed so we can't reuse
+	prevObjDerMode=prevConDerMode=0;
+	break;
+      }
+
+  if((obj_der_mode<=prevObjDerMode)&&
+     (con_der_mode<=prevConDerMode)) {
+    //we've already calculated everything you just asked for so reuse it
+    return;
+  }
+
+  //record the current theta as the previous theta so next time we can tell
+  //if we should reuse the stuff we calculate this time
+  if((prevObjDerMode==0)&&(prevConDerMode==0))
+    for(int k=0; k<numTheta; ++k)
+      prevTheta(k,0)=theta(k,0);
+
+  if(prevObjDerMode==0) {
+    //fill R with the build data "correlation matrix" (R is a member variable)
+    //for Kriging R is actually a correlation matrix (it is real, symmetric,
+    //positive definite and has all ones on the diagonal) for GEK it is
+    //real symmetric, and positive definite but does not have all ones on the
+    //diagonal, but the GEK R can be equilibrated/scaled to an honest to
+    //goodness correlation matrix.
+    correlation_matrix(theta);
+
+    //we need to perform a LU decomposition of R and calculate the
+    //determinant of R, I have replaced LU with Cholesky because it's
+    //better/faster, see
+    //http://en.wikipedia.org/wiki/Determinant#Determinant_from_LU_decomposition
+    //for how to efficiently compute the determinant from an LU factorization
+
+    int chol_info=0;
+    if(ifPrescribedNug==true) {
+      //the user prescribed a nugget for us to use, e.g. for when there is
+      //measurement error of known magnitude
+      apply_nugget_build(); //modify R by a nugget in place
+      reorderCopyRtoRChol();
+
+      Chol_fact_workspace(RChol,scaleRChol,rcondDblWork,rcondIntWork,
+			  chol_info,rcondR);
+      //Pivoted Cholesky o G*R^-1*G^T does not require pivoted Cholesky of R
+      //so the size of Gtran could have changed
+      nTrend=numTrend(polyOrderRequested,0);
+      if(Gtran.getNCols() < nTrend) {
+	Gtran.newSize(numEqnAvail,nTrend);
+	for(int itrend=0; itrend<nTrend; ++itrend)
+	  for(int i=0; i<numEqnAvail; ++i)
+	    Gtran(i,itrend)=Gall(itrend,i);
+      }
+
+    } else if(ifChooseNug==true) {
+      //the user wants us to select a small nugget to fix ill-conditioning of R
+      nuggetSelectingCholR();
+      //Pivoted Cholesky o G*R^-1*G^T does not require pivoted Cholesky of R
+      //so the size of Gtran could have changed
+      nTrend=numTrend(polyOrderRequested,0);
+      if(Gtran.getNCols() < nTrend) {
+	Gtran.newSize(numEqnAvail,nTrend);
+	for(int itrend=0; itrend<nTrend; ++itrend)
+	  for(int i=0; i<numEqnAvail; ++i)
+	    Gtran(i,itrend)=Gall(itrend,i);
+      }
+    }else {
+      //the user wants us to fix ill-conditioning of R by using Pivoted Cholesky
+      //to select an optimal subset of points from which to build the Kriging
+      //(or Gradient Enhanced Kriging) model
+      equationSelectingCholR();
+    }
+    double min_allowed_rcond=1.0/maxCondNum;
+    //nTrend=numTrend(polyOrder,0);
+    nTrend=numTrend(polyOrderRequested,0);
+
+    if((rcondR<=min_allowed_rcond)) { //||(numRowsR<=nTrend)) {
+      printf("singular correlation matrix rcondR=%g numRowsR=%d numTrend=%d numEqnAvail=%d\n",
+	     rcondR,numRowsR,nTrend,numEqnAvail);
+      MtxDbl corr_len_temp(numVarsr,1);
+      get_corr_len_from_theta(corr_len_temp, theta);
+      printf("corr_len=[%g",corr_len_temp(0,0));
+      for(int kk=1; kk<numVarsr; ++kk)
+	printf(",%g",corr_len_temp(kk,0));
+      printf("]^T\n");
+
+      obj=HUGE_VAL; //the objective would actually be infinite, but it might
+      //say nan if we let it continue and we don't want to trust the optimizer
+      //to handle nan's correctly
+
+      con.newSize(numConFunc,1);
+      con(0,0)=1.0-rcondR*maxCondNum;
+      //there should only be 1 constraint but just in case we'll fill the rest
+      //as being violated
+      for(int i=1; i<numConFunc; ++i)
+	con(i,0)=1.0; //say the constraints are violated,
+
+      //no point in wasting computation on something useless by continuing so
+      //return early
+      return;
+    }
+
+    double log_determinant_R = 0.0; //need to do this to avoid underflow error for large numbers of points, log(0)=-inf
+    for (int i = 0; i < numRowsR; ++i)
+      log_determinant_R += std::log(RChol(i,i));
+    log_determinant_R *= 2.0; //only multiply by 2 for Cholesky factorization
+    //of R because det(L)=det(U) and det(R)=det(L)*det(U)=det(L)^2
+    //so log(det(R))=2*log(det(L))
+
+    //if a future developer wants to switch back from cholesky to LU (and I
+    //strongly recommend against that) you'll need to do a
+    //determinant_R=std::fabs(determinant_R); //for LU factorization
+    //because "The determinant of a positive definite matrix is always
+    //positive" http://mathworld.wolfram.com/PositiveDefiniteMatrix.html and
+    //det(R)=det(pivot Mtx)*det(L)*det(U); det(L)=1, det(U) is what we'd
+    //calculated above for LU and det(pivot Mtx)= +/- 1, which is why you'd
+    //need to do the fabs(det(U)) if you used LU decomp instead of Cholesky
+
+    //Do the generalized (by R^-1) least squares using min # of ops
+    //printf("numPoints=%d numPointsKeep=%d numRowsR=%d nTrend=%d\n",
+    //   numPoints,numPointsKeep,numRowsR,nTrend);
+
+
+    Rinv_Gtran.newSize(numRowsR,nTrend); //precompute and store
+    solve_after_Chol_fact(Rinv_Gtran,RChol,Gtran);
+
+    G_Rinv_Gtran.newSize(nTrend,nTrend);
+    matrix_mult(G_Rinv_Gtran,Gtran,Rinv_Gtran,0.0,1.0,'T','N');
+
+    trendSelectingPivotedCholesky();
+    //Chol_fact_workspace(G_Rinv_Gtran_Chol,G_Rinv_Gtran_Chol_Scale,G_Rinv_Gtran_Chol_DblWork,G_Rinv_Gtran_Chol_IntWork,chol_info,rcond_G_Rinv_Gtran);
+    if((rcond_G_Rinv_Gtran<min_allowed_rcond)||(numRowsR<=nTrend)) {
+      //we could instead use pivoted cholesky to adaptively selected an optimal
+      //subset of trend basis functions (i.e. it could be lower in some
+      //dimensions than in others or have quadratic but not linear in certain
+      //dimensions etc) then we wouldn't have to worry about this
+
+      std::cerr << "R is not singular but G*R^-1*G^T is numerically "
+		<< "singular.  This is probably\ndue to you not having "
+		<< "enough UNIQUE values in one of your input dimensions\n"
+		<< "to support the utilized trend function even though "
+		<< "the total number of\npoints would normally be "
+		<< "sufficient for the selected trend." << std::endl;
+      obj=HUGE_VAL; //the objective would actually be infinite, but it might
+      //say nan if we let it continue and we don't want to trust the optimizer
+      //to handle nan's correctly
+
+      con.newSize(numConFunc,1);
+
+      //there should only be 1 constraint but just in case we'll fill them all
+      //as being violated
+      for(int i=0; i<numConFunc; ++i)
+	con(i,0)=1.0; //say the constraints are violated,
+
+      //no point in wasting computation on something useless by continuing so
+      //return early
+      return;
+    }
+
+#ifdef __KRIG_ERR_CHECK__
+    assert(chol_info==0);  //for debug, do something else for production
+#endif
+
+    double log_determinant_G_Rinv_Gtran=0.0;
+    for (int itrend = 0; itrend < nTrend; ++itrend)
+      log_determinant_G_Rinv_Gtran +=
+	std::log(G_Rinv_Gtran_Chol(itrend,itrend));
+    log_determinant_G_Rinv_Gtran *= 2.0; //only for Cholesky factorization
+
+    G_Rinv_Y.newSize(nTrend,1);
+    matrix_mult(G_Rinv_Y, Rinv_Gtran, Y, 0.0, 1.0, 'T', 'N');
+    betaHat.newSize(nTrend,1);
+
+    solve_after_Chol_fact(betaHat,G_Rinv_Gtran_Chol,G_Rinv_Y); //O(nTrend^2) ops
+    eps.copy(Y); //this will be eps=epsilon=Y-G(XR)^T*betaHat
+    matrix_mult(eps, Gtran, betaHat, 1.0, -1.0, 'N', 'N'); //eps=Y-G(XR)^T*betaHat
+    rhs.newSize(numRowsR,1);
+    solve_after_Chol_fact(rhs,RChol,eps);
+
+
+    //it's actually the log likelihood, which we want to maximize
+    //likelihood = -0.5*(numPoints*(std::log(4.0*std::acos(0.0))+std::log(estVarianceMLE)+1)
+    //		       +std::log(determinant_R)); //from Koehler and Owen
+
+#ifdef __NKM_UNBIASED_LIKE__
+    //derived following: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, 2006, ISBN 026218253X. c 2006 Massachusetts Institute of Technology. www.GaussianProcess.org/gpml...  we assume a "vague prior" (i.e. that we don't know anything) for betaHat, then like "Koehler and Owen" we replace the covariance matrix K with (unadjusted variance)*R (where R is the correlation matrix) and find unadjusted variance and betaHat through maximum likelihood.
+
+    //the unbiased estimate of unadjusted variance
+    estVarianceMLE = dot_product(eps,rhs)/(numRowsR-nTrend);
+
+    //the "per equationt" unbiased log(likelihood)
+    likelihood = -0.5*(std::log(estVarianceMLE)+(log_determinant_R+log_determinant_G_Rinv_Gtran)/(numRowsR-nTrend));
+#else
+    //derived the "Koehler and Owen" way (assumes we know the trend function, and is therefore biased
+
+    //the estimate of unadjusted variance
+    estVarianceMLE = dot_product(eps,rhs)/numRowsR; //the "Koehler and Owen" way
+
+    //the "per equation" log(likelihood)
+    likelihood = -0.5*(std::log(estVarianceMLE)+log_determinant_R/numRowsR);
+#endif
+
+    //if(likelihood>=DBL_MAX)
+    //printf("[estVarianceMLE=%g determinant_R=%g]",estVarianceMLE,determinant_R);
+
+    //the objective function being MINIMIZED is the negative of the log
+    //likelihood (on a per equation basis so numbers will be comparable
+    //regardless of how many equations there are)
+    obj=-likelihood;
+    //printf("[obj=%g]",obj);
+
+    prevObjDerMode=1; //increase prevObjDerMode to the current value
+    if((obj_der_mode==1)&&(con_der_mode<=prevConDerMode)) {
+      //we have everything we need so exit early
+      return;
+    }
+  }
+
+  if((prevConDerMode==0)&&(1<=con_der_mode)) {
+    //calculate the constraint on reciprocal condition number that ensures
+    //that the correlation matrix is well conditioned.
+    con.newSize(numConFunc,1);
+
+    if(!(1<=prevObjDerMode))
+      std::cerr << "We need to have already calculated rcondR (during the "
+		<< "calculation of the\nobjective function) in order to "
+		<< "calculate the constraint (on rcondR)\nfunction (where "
+		<< "rcondR is the reciprocal of the condition number of R,\n"
+		<< "and R is the ''correlation matrix'')." << std::endl;
+    else if(!(numConFunc==1))
+      std::cerr << "The calling function is asking us for more than one "
+		<< "constraint function\nbut we only have one constraint "
+		<< "function; only rcondR (the reciprocal of\nthe "
+		<< "condition number of the ''correlation matrix'', R) is "
+		<< "constrained." << std::endl;
+    assert((1<=prevObjDerMode)&&(numConFunc==1));
+
+    //the matrix is considered "ill-conditioned" if the following constraint
+    //equation is greater than zero
+    con(0,0)=1.0-rcondR*maxCondNum;
+
+    prevConDerMode=1; //increase prevConDerMode to current value
+    if((con_der_mode==1)&&(obj_der_mode<=prevObjDerMode)) {
+      //we have everything we need so exit early
+      return;
+    }
+  }
+
+  return;
+}
+
+
+void KrigingModel::getRandGuess(MtxDbl& guess) const
+{
+  int mymod = 1048576; //2^20 instead of 10^6 to be kind to the computer
+  guess.newSize(numVarsr,1);
+  for(int k=0; k<numVarsr; k++) {
+    guess(k,0) = (std::rand() % mymod)*(maxNatLogCorrLen-minNatLogCorrLen)/mymod+
+      minNatLogCorrLen; //this returns a random nat_log_corr_len which is the space we need to search in
+  }
+  return;
+}
+
+// BMA TODO: These need to be moved to optimizer and then any defauls
+// overridden here
+
+void KrigingModel::set_conmin_parameters(OptimizationProblem& opt) const
+{
+  //set conmin specific parameters for this problem
+  if((maxObjDerMode==1)&&(maxConDerMode==1)) {
+    //use numerical  gradients of objective and constraints
+    opt.conminData.nfdg = 0;
+  } else {
+    std::cerr << "This Kriging/Gradient-Enhanced-Kriging model does not "
+	      << "support analytical\nderivatives of the objective "
+	      << "(negative per equation log likelihood) or\nconstraint "
+	      << "(reciprocal condition number) functions." << std::endl;
+    assert(false);
+  }
+
+  opt.conminData.iprint = 0; //ammount of to screen output from Conmin
+  opt.conminData.itmax  = maxTrials; //maximum # of Conmin iterations
+  opt.conminData.fdch   = 1.0e-2; //Relative finite difference step size.
+  opt.conminData.fdchm  = 1.0e-2; //Absolute finite difference step size.
+  opt.conminData.ct     = -0.1; // Constraint thickness parameter, The absolute value of CT decreases in magnitude during optimization.
+  opt.conminData.ctmin  = 0.004; //Minimum absolute value of CT used during optimization.
+  opt.conminData.ctl    = -0.01; //Constraint thickness parameter for linear and side constraints.
+  opt.conminData.ctlmin = 0.001; //Minimum value of CTL used during optimization.
+  opt.conminData.delfun = 0.001; //Relative convergence criterion threshold, Threshold for the minimum relative change in the objective function
+  opt.conminData.dabfun = 0.001; //Absolute convergence criterion threshold. Threshold for the minimum relative change in the objective function
+  opt.conminData.nside  = 1; //side constraints parameter
+  opt.conminData.itrm   = 3; //diminishing return criterion iteration number
+  opt.conminData.icndir = numTheta+1; //conjugate direction restart parameter
+}
+
+void KrigingModel::set_direct_parameters(OptimizationProblem& opt) const
+{
+  opt.directData.minBoxSize = -1.0;
+  opt.directData.volBoxSize = -1.0;
+  //opt.directData.minBoxSize = 1.0e-15;
+  //opt.directData.volBoxSize = 1.0e-15;
+  //opt.directData.minBoxSize = 1.0e-3;
+  //opt.directData.volBoxSize = 1.0e-5;
+  opt.directData.solutionTarget = -DBL_MAX;
+  opt.directData.convergenceTol = 1.0e-4;
+  opt.directData.maxFunctionEvals = maxTrials;
+  opt.directData.maxIterations = 1000;
+  opt.directData.verboseOutput = false;
+  opt.directData.constraintsPresent = true;
+}
+
+} // end namespace nkm
Index: /issm/trunk/externalpackages/dakota/configs/6.2/src/DakotaInterface.cpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/src/DakotaInterface.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/src/DakotaInterface.cpp	(revision 24686)
@@ -0,0 +1,1358 @@
+/*  _______________________________________________________________________
+
+    DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
+    Copyright 2014 Sandia Corporation.
+    This software is distributed under the GNU Lesser General Public License.
+    For more information, see the README file in the top Dakota directory.
+    _______________________________________________________________________ */
+
+//- Class:        Interface
+//- Description:  Class implementation for abstract interface base class
+//- Owner:        Michael Eldred
+
+#include "DakotaInterface.hpp"
+#include "ProblemDescDB.hpp"
+#include "DakotaVariables.hpp"
+
+#include "SysCallApplicInterface.hpp"
+
+#if defined(HAVE_SYS_WAIT_H) && defined(HAVE_UNISTD_H)
+#include "ForkApplicInterface.hpp"
+#elif defined(_WIN32) // or _MSC_VER (native MSVS compilers)
+#include "SpawnApplicInterface.hpp"
+#endif // HAVE_SYS_WAIT_H, HAVE_UNISTD_H
+
+// Direct interfaces
+#ifdef DAKOTA_GRID
+#include "GridApplicInterface.hpp"
+#endif // DAKOTA_GRID
+#ifdef DAKOTA_MATLAB
+#include "MatlabInterface.hpp"
+#endif // DAKOTA_MATLAB
+#ifdef DAKOTA_PYTHON
+#include "PythonInterface.hpp"
+#endif // DAKOTA_PYTHON
+#ifdef DAKOTA_SCILAB
+#include "ScilabInterface.hpp"
+#endif // DAKOTA_SCILAB
+#include "TestDriverInterface.hpp"
+
+#include "ApproximationInterface.hpp"
+
+#ifdef HAVE_AMPL
+#undef NO // avoid name collision from UTILIB
+#include "ampl/asl.h"
+#endif // HAVE_AMPL
+
+//#define DEBUG
+//#define REFCOUNT_DEBUG
+
+namespace Dakota {
+
+
+/** This constructor is the one which must build the base class data for all
+    inherited interfaces.  get_interface() instantiates a derived class letter
+    and the derived constructor selects this base class constructor in its
+    initialization list (to avoid the recursion of the base class constructor
+    calling get_interface() again).  Since this is the letter and the letter
+    IS the representation, interfaceRep is set to NULL (an uninitialized
+    pointer causes problems in ~Interface). */
+Interface::Interface(BaseConstructor, const ProblemDescDB& problem_db):
+  interfaceType(problem_db.get_ushort("interface.type")),
+  interfaceId(problem_db.get_string("interface.id")), algebraicMappings(false),
+  coreMappings(true), outputLevel(problem_db.get_short("method.output")),
+  currEvalId(0), fineGrainEvalCounters(outputLevel > NORMAL_OUTPUT),
+  evalIdCntr(0), newEvalIdCntr(0), evalIdRefPt(0), newEvalIdRefPt(0),
+  multiProcEvalFlag(false), ieDedMasterFlag(false),
+  // See base constructor in DakotaIterator.cpp for full discussion of output
+  // verbosity.  Interfaces support the full granularity in verbosity.
+  appendIfaceId(true), interfaceRep(NULL), referenceCount(1), asl(NULL)
+{
+#ifdef DEBUG
+  outputLevel = DEBUG_OUTPUT;
+#endif // DEBUG
+
+  // Process the algebraic_mappings file (an AMPL .nl file) to get the number
+  // of variables/responses (currently, the tags are converted to index arrays
+  // at evaluation time, using the passed vars and response).
+  // TO DO: parallel bcast of data or very proc reads file?
+  const String& ampl_file_name
+    = problem_db.get_string("interface.algebraic_mappings");
+  if (!ampl_file_name.empty()) {
+#ifdef HAVE_AMPL
+    algebraicMappings = true;
+    bool hess_flag
+      = (problem_db.get_string("responses.hessian_type") == "analytic");
+    asl = (hess_flag) ? ASL_alloc(ASL_read_pfgh) : ASL_alloc(ASL_read_fg);
+    // allow user input of either stub or stub.nl
+    String stub = (strends(ampl_file_name, ".nl")) ?
+      String(ampl_file_name, 0, ampl_file_name.size() - 3) : ampl_file_name;
+    //std::ifstream ampl_nl(ampl_file_name);
+    fint stub_str_len = stub.size();
+    // BMA NOTE: casting away the constness as done historically in DakotaString
+    char* nonconst_stub = (char*) stub.c_str();
+    FILE* ampl_nl = jac0dim(nonconst_stub, stub_str_len);
+    if (!ampl_nl) {
+      Cerr << "\nError: failure opening " << ampl_file_name << std::endl;
+      abort_handler(IO_ERROR);
+    }
+    int rtn = (hess_flag) ? pfgh_read(ampl_nl, ASL_return_read_err)
+                          :   fg_read(ampl_nl, ASL_return_read_err);
+    if (rtn) {
+      Cerr << "\nError: AMPL processing problem with " << ampl_file_name
+	   << std::endl;
+      abort_handler(IO_ERROR);
+    }
+
+    // extract input/output tag lists
+    String row = stub + ".row", col = stub + ".col", ampl_tag;
+
+    std::ifstream ampl_col(col.c_str());
+    if (!ampl_col) {
+      Cerr << "\nError: failure opening " << col.c_str() << std::endl;
+      abort_handler(IO_ERROR);
+    }
+    algebraicVarTags.resize(n_var);
+    for (size_t i=0; i<n_var; i++) {
+      std::getline(ampl_col, ampl_tag);
+      if (ampl_col.good())
+	algebraicVarTags[i] = ampl_tag;
+      else {
+	Cerr << "\nError: failure reading AMPL col file " << col.c_str()
+	     << std::endl;
+	abort_handler(IO_ERROR);
+      }
+    }
+
+    std::ifstream ampl_row(row.c_str());
+    if (!ampl_row) {
+      Cerr << "\nError: failure opening " << row.c_str() << std::endl;
+      abort_handler(IO_ERROR);
+    }
+    algebraicFnTags.resize(n_obj+n_con);
+    algebraicFnTypes.resize(n_obj+n_con);
+    algebraicConstraintWeights.resize(n_con);
+    for (size_t i=0; i<n_obj+n_con; i++) {
+      getline(ampl_row, ampl_tag);
+      if (ampl_row.good()) {
+	algebraicFnTags[i] = ampl_tag;
+	algebraicFnTypes[i] = algebraic_function_type(ampl_tag);
+      }
+      else {
+	Cerr << "\nError: failure reading AMPL row file " << row.c_str()
+	     << std::endl;
+	abort_handler(IO_ERROR);
+      }
+    }
+
+#ifdef DEBUG
+    Cout << ">>>>> algebraicVarTags =\n" << algebraicVarTags
+	 << "\n>>>>> algebraicFnTags =\n" << algebraicFnTags
+	 << "\n>>>>> algebraicFnTypes =\n" << algebraicFnTypes << std::endl;
+#endif
+
+#else
+    Cerr << "\nError: algebraic_mappings not supported without the AMPL solver "
+	 << "library provided with the Acro package." << std::endl;
+    abort_handler(-1);
+#endif // HAVE_AMPL
+  }
+
+#ifdef REFCOUNT_DEBUG
+  Cout << "Interface::Interface(BaseConstructor, ProblemDescDB&) called to "
+       << "build base class data for letter object." << std::endl;
+#endif
+}
+
+
+Interface::Interface(NoDBBaseConstructor, size_t num_fns, short output_level):
+  interfaceId("NO_SPECIFICATION"), algebraicMappings(false), coreMappings(true),
+  outputLevel(output_level), currEvalId(0),
+  fineGrainEvalCounters(outputLevel > NORMAL_OUTPUT), evalIdCntr(0),
+  newEvalIdCntr(0), evalIdRefPt(0), newEvalIdRefPt(0), multiProcEvalFlag(false),
+  ieDedMasterFlag(false), appendIfaceId(true), interfaceRep(NULL),
+  referenceCount(1)
+{
+#ifdef DEBUG
+  outputLevel = DEBUG_OUTPUT;
+#endif // DEBUG
+
+#ifdef REFCOUNT_DEBUG
+  Cout << "Interface::Interface(NoDBBaseConstructor) called to build base "
+       << "class data for letter object." << std::endl;
+#endif
+}
+
+
+/** used in Model envelope class instantiations */
+Interface::Interface(): interfaceRep(NULL), referenceCount(1)
+{ }
+
+
+/** Used in Model instantiation to build the envelope.  This constructor
+    only needs to extract enough data to properly execute get_interface, since
+    Interface::Interface(BaseConstructor, problem_db) builds the
+    actual base class data inherited by the derived interfaces. */
+Interface::Interface(ProblemDescDB& problem_db): referenceCount(1)
+{
+#ifdef REFCOUNT_DEBUG
+  Cout << "Interface::Interface(ProblemDescDB&) called to instantiate envelope."
+       << std::endl;
+#endif
+
+  // Set the rep pointer to the appropriate interface type
+  interfaceRep = get_interface(problem_db);
+  if (!interfaceRep) // bad type or insufficient memory
+    abort_handler(-1);
+}
+
+
+/** used only by the envelope constructor to initialize interfaceRep
+    to the appropriate derived type. */
+Interface* Interface::get_interface(ProblemDescDB& problem_db)
+{
+  const unsigned short interface_type = problem_db.get_ushort("interface.type");
+#ifdef REFCOUNT_DEBUG
+  Cout << "Envelope instantiating letter: Getting interface "
+       << interface_enum_to_string(interface_type) << std::endl;
+#endif
+
+  // In the case where a derived interface type has been selected for managing
+  // analysis_drivers, then this determines the letter instantiation and any
+  // algebraic mappings are overlayed by ApplicationInterface.
+  const String& algebraic_map_file
+    = problem_db.get_string("interface.algebraic_mappings");
+  if (interface_type == SYSTEM_INTERFACE)
+    return new SysCallApplicInterface(problem_db);
+
+  else if (interface_type == FORK_INTERFACE) {
+#if defined(HAVE_SYS_WAIT_H) && defined(HAVE_UNISTD_H) // includes CYGWIN/MINGW
+    return new ForkApplicInterface(problem_db);
+#elif defined(_WIN32) // or _MSC_VER (native MSVS compilers)
+    return new SpawnApplicInterface(problem_db);
+#else
+    Cerr << "Fork interface requested, but not enabled in this DAKOTA "
+	 << "executable." << std::endl;
+    return NULL;
+#endif
+  }
+
+  else if (interface_type == TEST_INTERFACE)
+    return new TestDriverInterface(problem_db);
+  // Note: in the case of a plug-in direct interface, this object gets replaced
+  // using Interface::assign_rep().  Error checking in DirectApplicInterface::
+  // derived_map_ac() should catch if this replacement fails to occur properly.
+
+#ifdef DAKOTA_GRID
+  else if (interface_type == GRID_INTERFACE)
+    return new GridApplicInterface(problem_db);
+#endif
+
+  else if (interface_type == MATLAB_INTERFACE) {
+#ifdef DAKOTA_MATLAB
+    return new MatlabInterface(problem_db);
+#else
+    Cerr << "Direct Matlab interface requested, but not enabled in this "
+	 << "DAKOTA executable." << std::endl;
+      return NULL;
+#endif
+  }
+
+  else if (interface_type == PYTHON_INTERFACE) {
+#ifdef DAKOTA_PYTHON
+    return new PythonInterface(problem_db);
+#else
+    Cerr << "Direct Python interface requested, but not enabled in this "
+	 << "DAKOTA executable." << std::endl;
+    return NULL;
+#endif
+  }
+
+  else if (interface_type == SCILAB_INTERFACE) {
+#ifdef DAKOTA_SCILAB
+    return new ScilabInterface(problem_db);
+#else
+    Cerr << "Direct Scilab interface requested, but not enabled in this "
+	 << "DAKOTA executable." << std::endl;
+    return NULL;
+#endif
+  }
+
+  // Should not be needed since ApproximationInterface is plugged-in from
+  // DataFitSurrModel using Interface::assign_rep().
+  //else if (interface_type == APPROX_INTERFACE)
+  //  return new ApproximationInterface(problem_db, num_acv, num_fns);
+
+  // In the case where only algebraic mappings are used, then no derived map
+  // functionality is needed and ApplicationInterface is used for the letter.
+  else if (!algebraic_map_file.empty()) {
+#ifdef DEBUG
+    Cout << ">>>>> new ApplicationInterface: " << algebraic_map_file
+	 << std::endl;
+#endif // DEBUG
+    return new ApplicationInterface(problem_db);
+  }
+
+  // If the interface type is empty (e.g., from default DataInterface creation
+  // in ProblemDescDB::check_input()), then ApplicationInterface is the letter.
+  else if (interface_type == DEFAULT_INTERFACE) {
+    Cerr << "Warning: empty interface type in Interface::get_interface()."
+	 << std::endl;
+    return new ApplicationInterface(problem_db);
+  }
+
+  else {
+    Cerr << "Invalid interface: " << interface_enum_to_string(interface_type)
+	 << std::endl;
+    return NULL;
+  }
+}
+
+
+/** Copy constructor manages sharing of interfaceRep and incrementing
+    of referenceCount. */
+Interface::Interface(const Interface& interface_in)
+{
+  // Increment new (no old to decrement)
+  interfaceRep = interface_in.interfaceRep;
+  if (interfaceRep) // Check for an assignment of NULL
+    ++interfaceRep->referenceCount;
+
+#ifdef REFCOUNT_DEBUG
+  Cout << "Interface::Interface(Interface&)" << std::endl;
+  if (interfaceRep)
+    Cout << "interfaceRep referenceCount = " << interfaceRep->referenceCount
+	 << std::endl;
+#endif
+}
+
+
+/** Assignment operator decrements referenceCount for old interfaceRep, assigns
+    new interfaceRep, and increments referenceCount for new interfaceRep. */
+Interface Interface::operator=(const Interface& interface_in)
+{
+  if (interfaceRep != interface_in.interfaceRep) { // normal case: old != new
+    // Decrement old
+    if (interfaceRep) // Check for NULL
+      if ( --interfaceRep->referenceCount == 0 )
+	delete interfaceRep;
+    // Assign and increment new
+    interfaceRep = interface_in.interfaceRep;
+    if (interfaceRep) // Check for NULL
+      ++interfaceRep->referenceCount;
+  }
+  // else if assigning same rep, then do nothing since referenceCount
+  // should already be correct
+
+#ifdef REFCOUNT_DEBUG
+  Cout << "Interface::operator=(Interface&)" << std::endl;
+  if (interfaceRep)
+    Cout << "interfaceRep referenceCount = " << interfaceRep->referenceCount
+	 << std::endl;
+#endif
+
+  return *this; // calls copy constructor since returned by value
+}
+
+
+/** Destructor decrements referenceCount and only deletes interfaceRep
+    if referenceCount is zero. */
+Interface::~Interface()
+{
+  // Check for NULL pointer
+  if (interfaceRep) {
+    --interfaceRep->referenceCount;
+#ifdef REFCOUNT_DEBUG
+    Cout << "interfaceRep referenceCount decremented to "
+         << interfaceRep->referenceCount << std::endl;
+#endif
+    if (interfaceRep->referenceCount == 0) {
+#ifdef REFCOUNT_DEBUG
+      Cout << "deleting interfaceRep" << std::endl;
+#endif
+      delete interfaceRep;
+    }
+  }
+}
+
+
+/** Similar to the assignment operator, the assign_rep() function
+    decrements referenceCount for the old interfaceRep and assigns the
+    new interfaceRep.  It is different in that it is used for
+    publishing derived class letters to existing envelopes, as opposed
+    to sharing representations among multiple envelopes (in particular,
+    assign_rep is passed a letter object and operator= is passed an
+    envelope object).  Letter assignment supports two models as
+    governed by ref_count_incr:
+
+    \li ref_count_incr = true (default): the incoming letter belongs to
+    another envelope.  In this case, increment the reference count in the
+    normal manner so that deallocation of the letter is handled properly.
+
+    \li ref_count_incr = false: the incoming letter is instantiated on the
+    fly and has no envelope.  This case is modeled after get_interface():
+    a letter is dynamically allocated using new and passed into assign_rep,
+    the letter's reference count is not incremented, and the letter is not
+    remotely deleted (its memory management is passed over to the envelope). */
+void Interface::assign_rep(Interface* interface_rep, bool ref_count_incr)
+{
+  if (interfaceRep == interface_rep) {
+    // if ref_count_incr = true (rep from another envelope), do nothing as
+    // referenceCount should already be correct (see also operator= logic).
+    // if ref_count_incr = false (rep from on the fly), then this is an error.
+    if (!ref_count_incr) {
+      Cerr << "Error: duplicated interface_rep pointer assignment without "
+	   << "reference count increment in Interface::assign_rep()."
+	   << std::endl;
+      abort_handler(-1);
+    }
+  }
+  else { // normal case: old != new
+    // Decrement old
+    if (interfaceRep) // Check for NULL
+      if ( --interfaceRep->referenceCount == 0 )
+	delete interfaceRep;
+    // Assign new
+    interfaceRep = interface_rep;
+    // Increment new
+    if (interfaceRep && ref_count_incr) // Check for NULL & honor ref_count_incr
+      interfaceRep->referenceCount++;
+  }
+
+#ifdef REFCOUNT_DEBUG
+  Cout << "Interface::assign_rep(Interface*)" << std::endl;
+  if (interfaceRep)
+    Cout << "interfaceRep referenceCount = " << interfaceRep->referenceCount
+	 << std::endl;
+#endif
+}
+
+
+void Interface::fine_grained_evaluation_counters(size_t num_fns)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->fine_grained_evaluation_counters(num_fns);
+  else if (!fineGrainEvalCounters) { // letter (not virtual)
+    init_evaluation_counters(num_fns);
+    fineGrainEvalCounters = true;
+  }
+}
+
+
+void Interface::init_evaluation_counters(size_t num_fns)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->init_evaluation_counters(num_fns);
+  else { // letter (not virtual)
+    //if (fnLabels.empty()) {
+    //  fnLabels.resize(num_fns);
+    //  build_labels(fnLabels, "response_fn_"); // generic resp fn labels
+    //}
+    if (fnValCounter.size() != num_fns) {
+      fnValCounter.assign(num_fns, 0);     fnGradCounter.assign(num_fns, 0);
+      fnHessCounter.assign(num_fns, 0);    newFnValCounter.assign(num_fns, 0);
+      newFnGradCounter.assign(num_fns, 0); newFnHessCounter.assign(num_fns, 0);
+      fnValRefPt.assign(num_fns, 0);       fnGradRefPt.assign(num_fns, 0);
+      fnHessRefPt.assign(num_fns, 0);      newFnValRefPt.assign(num_fns, 0);
+      newFnGradRefPt.assign(num_fns, 0);   newFnHessRefPt.assign(num_fns, 0);
+    }
+  }
+}
+
+
+void Interface::set_evaluation_reference()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->set_evaluation_reference();
+  else { // letter (not virtual)
+
+    evalIdRefPt    = evalIdCntr;
+    newEvalIdRefPt = newEvalIdCntr;
+
+    if (fineGrainEvalCounters) {
+      size_t i, num_fns = fnValCounter.size();
+      for (i=0; i<num_fns; i++) {
+	fnValRefPt[i]     =     fnValCounter[i];
+	newFnValRefPt[i]  =  newFnValCounter[i];
+	fnGradRefPt[i]    =    fnGradCounter[i];
+	newFnGradRefPt[i] = newFnGradCounter[i];
+	fnHessRefPt[i]    =    fnHessCounter[i];
+	newFnHessRefPt[i] = newFnHessCounter[i];
+      }
+    }
+  }
+}
+
+
+void Interface::
+print_evaluation_summary(std::ostream& s, bool minimal_header,
+			 bool relative_count) const
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->print_evaluation_summary(s, minimal_header, relative_count);
+  else { // letter (not virtual)
+
+    // standard evaluation summary
+    if (minimal_header) {
+      if (interfaceId.empty())
+	s << "  Interface evaluations";
+      else
+	s << "  " << interfaceId << " evaluations";
+    }
+    else {
+      s << "<<<<< Function evaluation summary";
+      if (!interfaceId.empty())
+	s << " (" << interfaceId << ')';
+    }
+    int     fn_evals = (relative_count) ? evalIdCntr - evalIdRefPt
+                                        : evalIdCntr;
+    int new_fn_evals = (relative_count) ? newEvalIdCntr - newEvalIdRefPt
+                                        : newEvalIdCntr;
+    s << ": " << fn_evals << " total (" << new_fn_evals << " new, "
+      << fn_evals - new_fn_evals << " duplicate)\n";
+
+    // detailed evaluation summary
+    if (fineGrainEvalCounters) {
+      size_t i, num_fns = std::min(fnValCounter.size(), fnLabels.size());
+      for (i=0; i<num_fns; i++) {
+	int t_v = (relative_count) ?     fnValCounter[i] -     fnValRefPt[i]
+	                           :     fnValCounter[i];
+	int n_v = (relative_count) ?  newFnValCounter[i] -  newFnValRefPt[i]
+	                           :  newFnValCounter[i];
+	int t_g = (relative_count) ?    fnGradCounter[i] -    fnGradRefPt[i]
+	                           :    fnGradCounter[i];
+	int n_g = (relative_count) ? newFnGradCounter[i] - newFnGradRefPt[i]
+	                           : newFnGradCounter[i];
+	int t_h = (relative_count) ?    fnHessCounter[i] -    fnHessRefPt[i]
+	                           :    fnHessCounter[i];
+	int n_h = (relative_count) ? newFnHessCounter[i] - newFnHessRefPt[i]
+	                           : newFnHessCounter[i];
+	s << std::setw(15) << fnLabels[i] << ": "
+	  << t_v << " val ("  << n_v << " n, " << t_v - n_v << " d), "
+	  << t_g << " grad (" << n_g << " n, " << t_g - n_g << " d), "
+	  << t_h << " Hess (" << n_h << " n, " << t_h - n_h << " d)\n";
+      }
+    }
+  }
+}
+
+
+/// default implementation just sets the list of eval ID tags;
+/// derived classes containing additional models or interfaces should
+/// override (currently no use cases)
+void Interface::
+eval_tag_prefix(const String& eval_id_str, bool append_iface_id)
+{
+  if (interfaceRep)
+    interfaceRep->eval_tag_prefix(eval_id_str, append_iface_id);
+  else {
+    evalTagPrefix = eval_id_str;
+    appendIfaceId = append_iface_id;
+  }
+}
+
+
+void Interface::map(const Variables& vars, const ActiveSet& set,
+		    Response& response, bool asynch_flag)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->map(vars, set, response, asynch_flag);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual map function.\n"
+         << "No default map defined at Interface base class." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+init_algebraic_mappings(const Variables& vars, const Response& response)
+{
+  size_t i, num_alg_vars = algebraicVarTags.size(),
+    num_alg_fns = algebraicFnTags.size();
+
+  algebraicACVIndices.resize(num_alg_vars);
+  algebraicACVIds.resize(num_alg_vars);
+  StringMultiArrayConstView acv_labels = vars.all_continuous_variable_labels();
+  SizetMultiArrayConstView  acv_ids    = vars.all_continuous_variable_ids();
+  for (i=0; i<num_alg_vars; ++i) {
+    // Note: variable mappings only support continuous variables.
+    //       discrete variables are not directly supported by ASL interface.
+    size_t acv_index = find_index(acv_labels, algebraicVarTags[i]);
+    //size_t adv_index = find_index(adv_labels, algebraicVarTags[i]);
+    if (acv_index == _NPOS) { // && adv_index == _NPOS) {
+      Cerr << "\nError: AMPL column label " << algebraicVarTags[i] << " does "
+	   <<"not exist in DAKOTA continuous variable descriptors.\n"
+	   << std::endl;
+      abort_handler(INTERFACE_ERROR);
+    }
+    else {
+      algebraicACVIndices[i] = acv_index;
+      //algebraicADVIndices[i] = adv_index;
+      algebraicACVIds[i] = acv_ids[acv_index];
+    }
+  }
+
+  algebraicFnIndices.resize(num_alg_fns);
+  const StringArray& fn_labels = response.function_labels();
+  for (size_t i=0; i<num_alg_fns; ++i) {
+    size_t fn_index = Pecos::find_index(fn_labels, algebraicFnTags[i]);
+    if (fn_index == _NPOS) {
+      Cerr << "\nError: AMPL row label " << algebraicFnTags[i] << " does not "
+	   <<"exist in DAKOTA response descriptors.\n" << std::endl;
+      abort_handler(INTERFACE_ERROR);
+    }
+    else
+      algebraicFnIndices[i] = fn_index;
+  }
+}
+
+
+void Interface::
+asv_mapping(const ActiveSet& total_set, ActiveSet& algebraic_set,
+	    ActiveSet& core_set)
+{
+  const ShortArray& total_asv = total_set.request_vector();
+  const SizetArray& total_dvv = total_set.derivative_vector();
+
+  // algebraic_asv/dvv:
+
+  // the algebraic active set is defined over reduced algebraic function
+  // and variable spaces, rather than the original spaces.  This simplifies
+  // algebraic_mappings() and allows direct copies of data from AMPL.
+  size_t i, num_alg_fns = algebraicFnTags.size(),
+    num_alg_vars = algebraicVarTags.size();
+  ShortArray algebraic_asv(num_alg_fns);
+  SizetArray algebraic_dvv(num_alg_vars);
+  for (i=0; i<num_alg_fns; i++) // map total_asv to algebraic_asv
+    algebraic_asv[i] = total_asv[algebraicFnIndices[i]];
+
+  algebraic_set.request_vector(algebraic_asv);
+  algebraic_set.derivative_vector(algebraic_dvv);
+  algebraic_set.derivative_start_value(1);
+
+  // core_asv/dvv:
+
+  // for now, core_asv is the same as total_asv, since there is no mechanism
+  // yet to determine if the algebraic_mapping portion is the complete
+  // definition (for which core_asv requests could be turned off).
+  core_set.request_vector(total_asv);
+  core_set.derivative_vector(total_dvv);
+}
+
+
+void Interface::
+asv_mapping(const ActiveSet& algebraic_set, ActiveSet& total_set)
+{
+  const ShortArray& algebraic_asv = algebraic_set.request_vector();
+  size_t i, num_alg_fns = algebraicFnTags.size();
+  for (i=0; i<num_alg_fns; i++) // map algebraic_asv to total_asv
+    total_set.request_value(algebraic_asv[i], algebraicFnIndices[i]);
+}
+
+
+void Interface::
+algebraic_mappings(const Variables& vars, const ActiveSet& algebraic_set,
+		   Response& algebraic_response)
+{
+#ifdef HAVE_AMPL
+  // make sure cur_ASL is pointing to the ASL of this interface
+  // this is important for problems with multiple interfaces
+  set_cur_ASL(asl);
+  const ShortArray& algebraic_asv = algebraic_set.request_vector();
+  const SizetArray& algebraic_dvv = algebraic_set.derivative_vector();
+  size_t i, num_alg_fns = algebraic_asv.size(),
+    num_alg_vars = algebraic_dvv.size();
+  bool grad_flag = false, hess_flag = false;
+  for (i=0; i<num_alg_fns; ++i) {
+    if (algebraic_asv[i] & 2)
+      grad_flag = true;
+    if (algebraic_asv[i] & 4)
+      hess_flag = true;
+  }
+
+  // dak_a_c_vars (DAKOTA space) -> nl_vars (reduced AMPL space)
+  const RealVector& dak_a_c_vars = vars.all_continuous_variables();
+  //IntVector  dak_a_d_vars = vars.all_discrete_variables();
+  Real* nl_vars = new Real [num_alg_vars];
+  for (i=0; i<num_alg_vars; i++)
+    nl_vars[i] = dak_a_c_vars[algebraicACVIndices[i]];
+
+  // nl_vars -> algebraic_response
+  algebraic_response.reset_inactive(); // zero inactive data
+  Real fn_val; RealVector fn_grad; RealSymMatrix fn_hess;
+  fint err = 0;
+  for (i=0; i<num_alg_fns; i++) {
+    // nl_vars -> response fns via AMPL
+    if (algebraic_asv[i] & 1) {
+      if (algebraicFnTypes[i] > 0)
+	fn_val = objval(algebraicFnTypes[i]-1, nl_vars, &err);
+      else
+	fn_val = conival(-1-algebraicFnTypes[i], nl_vars, &err);
+      if (err) {
+	Cerr << "\nError: AMPL processing failure in objval().\n" << std::endl;
+	abort_handler(INTERFACE_ERROR);
+      }
+      algebraic_response.function_value(fn_val, i);
+    }
+    // nl_vars -> response grads via AMPL
+    if (algebraic_asv[i] & 6) { // need grad for Hessian
+      fn_grad = algebraic_response.function_gradient_view(i);
+      if (algebraicFnTypes[i] > 0)
+	objgrd(algebraicFnTypes[i]-1, nl_vars, fn_grad.values(), &err);
+      else
+	congrd(-1-algebraicFnTypes[i], nl_vars, fn_grad.values(), &err);
+      if (err) {
+	Cerr << "\nError: AMPL processing failure in objgrad().\n" << std::endl;
+	abort_handler(INTERFACE_ERROR);
+      }
+    }
+    // nl_vars -> response Hessians via AMPL
+    if (algebraic_asv[i] & 4) {
+      fn_hess = algebraic_response.function_hessian_view(i);
+      // the fullhess calls must follow corresp call to objgrad/congrad
+      if (algebraicFnTypes[i] > 0)
+	fullhes(fn_hess.values(), num_alg_vars, algebraicFnTypes[i]-1,
+		NULL, NULL);
+      else {
+	algebraicConstraintWeights.assign(algebraicConstraintWeights.size(), 0);
+	algebraicConstraintWeights[-1-algebraicFnTypes[i]] = 1;
+	fullhes(fn_hess.values(), num_alg_vars, num_alg_vars, NULL,
+		&algebraicConstraintWeights[0]);
+      }
+    }
+  }
+  delete [] nl_vars;
+  algebraic_response.function_labels(algebraicFnTags);
+#ifdef DEBUG
+  Cout << ">>>>> algebraic_response.fn_labels\n"
+       << algebraic_response.function_labels() << std::endl;
+#endif // DEBUG
+
+  if (outputLevel > NORMAL_OUTPUT)
+    Cout << "Algebraic mapping applied.\n";
+#endif // HAVE_AMPL
+}
+
+
+/** This function will get invoked even when only algebraic mappings are
+    active (no core mappings from derived_map), since the AMPL
+    algebraic_response may be ordered differently from the total_response.
+    In this case, the core_response object is unused. */
+void Interface::
+response_mapping(const Response& algebraic_response,
+		 const Response& core_response, Response& total_response)
+{
+  const ShortArray& total_asv = total_response.active_set_request_vector();
+  const SizetArray& total_dvv = total_response.active_set_derivative_vector();
+  size_t i, j, k, num_total_fns = total_asv.size(),
+    num_total_vars = total_dvv.size();
+  bool grad_flag = false, hess_flag = false;
+  for (i=0; i<num_total_fns; ++i) {
+    if (total_asv[i] & 2)
+      grad_flag = true;
+    if (total_asv[i] & 4)
+      hess_flag = true;
+  }
+
+  // core_response contributions to total_response:
+
+  if (coreMappings) {
+    total_response.reset_inactive();
+    const ShortArray& core_asv = core_response.active_set_request_vector();
+    size_t num_core_fns = core_asv.size();
+    for (i=0; i<num_core_fns; ++i) {
+      if (core_asv[i] & 1)
+	total_response.function_value(core_response.function_value(i), i);
+      if (core_asv[i] & 2)
+	total_response.function_gradient(
+	  core_response.function_gradient_view(i), i);
+      if (core_asv[i] & 4)
+	total_response.function_hessian(core_response.function_hessian(i), i);
+    }
+  }
+  else {
+    // zero all response data before adding algebraic data to it
+    total_response.reset();
+  }
+
+  // algebraic_response contributions to total_response:
+
+  const ShortArray& algebraic_asv
+    = algebraic_response.active_set_request_vector();
+  size_t num_alg_fns = algebraic_asv.size(),
+    num_alg_vars = algebraic_response.active_set_derivative_vector().size();
+  if (num_alg_fns > num_total_fns) {
+    Cerr << "Error: response size mismatch in Interface::response_mapping()."
+	 << std::endl;
+    abort_handler(-1);
+  }
+  if ( (grad_flag || hess_flag) && num_alg_vars > num_total_vars) {
+    Cerr << "Error: derivative variables size mismatch in Interface::"
+         << "response_mapping()." << std::endl;
+    abort_handler(-1);
+  }
+  SizetArray algebraic_dvv_indices;
+  if (grad_flag || hess_flag) {
+    algebraic_dvv_indices.resize(num_alg_vars);
+    using Pecos::find_index;
+    for (i=0; i<num_alg_vars; ++i)
+      algebraic_dvv_indices[i] = find_index(total_dvv, algebraicACVIds[i]);
+      // Note: _NPOS return is handled below
+  }
+  // augment total_response
+  const RealVector& algebraic_fn_vals = algebraic_response.function_values();
+  const RealMatrix& algebraic_fn_grads
+    = algebraic_response.function_gradients();
+  const RealSymMatrixArray& algebraic_fn_hessians
+    = algebraic_response.function_hessians();
+  RealVector total_fn_vals = total_response.function_values_view();
+  for (i=0; i<num_alg_fns; ++i) {
+    size_t fn_index = algebraicFnIndices[i];
+    if (algebraic_asv[i] & 1)
+      total_fn_vals[fn_index] += algebraic_fn_vals[i];
+    if (algebraic_asv[i] & 2) {
+      const Real* algebraic_fn_grad = algebraic_fn_grads[i];
+      RealVector total_fn_grad
+	= total_response.function_gradient_view(fn_index);
+      for (j=0; j<num_alg_vars; j++) {
+	size_t dvv_index = algebraic_dvv_indices[j];
+	if (dvv_index != _NPOS)
+	  total_fn_grad[dvv_index] += algebraic_fn_grad[j];
+      }
+    }
+    if (algebraic_asv[i] & 4) {
+      const RealSymMatrix& algebraic_fn_hess = algebraic_fn_hessians[i];
+      RealSymMatrix total_fn_hess
+	= total_response.function_hessian_view(fn_index);
+      for (j=0; j<num_alg_vars; ++j) {
+	size_t dvv_index_j = algebraic_dvv_indices[j];
+	if (dvv_index_j != _NPOS) {
+	  for (k=0; k<=j; ++k) {
+	    size_t dvv_index_k = algebraic_dvv_indices[k];
+	    if (dvv_index_k != _NPOS)
+	      total_fn_hess(dvv_index_j,dvv_index_k) +=	algebraic_fn_hess(j,k);
+	  }
+	}
+      }
+    }
+  }
+
+  // output response sets:
+
+  if (outputLevel == DEBUG_OUTPUT) {
+    if (coreMappings) Cout << "core_response:\n" << core_response;
+    Cout << "algebraic_response:\n" << algebraic_response
+	 << "total_response:\n"     << total_response << '\n';
+  }
+}
+
+
+String Interface::final_eval_id_tag(int iface_eval_id)
+{
+  if (interfaceRep)
+    return interfaceRep->final_eval_id_tag(iface_eval_id);
+
+  if (appendIfaceId)
+    return evalTagPrefix + "." + boost::lexical_cast<std::string>(iface_eval_id);
+  return evalTagPrefix;
+}
+
+
+int Interface::algebraic_function_type(String functionTag)
+{
+#ifdef HAVE_AMPL
+  int i;
+  for (i=0; i<n_obj; i++)
+    if (strcontains(functionTag, obj_name(i)))
+      return i+1;
+  for (i=0; i<n_con; i++)
+    if (strcontains(functionTag, con_name(i)))
+      return -(i+1);
+
+  Cerr << "Error: No function type available for \'" << functionTag << "\' "
+       << "via algebraic_mappings interface." << std::endl;
+  abort_handler(INTERFACE_ERROR);
+#else
+  return 0;
+#endif // HAVE_AMPL
+}
+
+const IntResponseMap& Interface::synch()
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual synch function.\n"
+         << "No default synch defined at Interface base class." << std::endl;
+    abort_handler(-1);
+  }
+
+  return interfaceRep->synch();
+}
+
+
+const IntResponseMap& Interface::synch_nowait()
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual synch_nowait "
+	 << "function.\nNo default synch_nowait defined at Interface base "
+	 << "class." << std::endl;
+    abort_handler(-1);
+  }
+
+  return interfaceRep->synch_nowait();
+}
+
+
+void Interface::serve_evaluations()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->serve_evaluations();
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual serve_evaluations "
+	 << "function.\nNo default serve_evaluations defined at Interface"
+	 << " base class." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::stop_evaluation_servers()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->stop_evaluation_servers();
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual stop_evaluation_"
+	 << "servers fn.\nNo default stop_evaluation_servers defined at "
+	 << "Interface base class." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::init_communicators(const IntArray& message_lengths,
+				   int max_eval_concurrency)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->init_communicators(message_lengths, max_eval_concurrency);
+  else { // letter lacking redefinition of virtual fn.
+    // ApproximationInterfaces: do nothing
+  }
+}
+
+
+void Interface::set_communicators(const IntArray& message_lengths,
+				  int max_eval_concurrency)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->set_communicators(message_lengths, max_eval_concurrency);
+  else { // letter lacking redefinition of virtual fn.
+    // ApproximationInterfaces: do nothing
+  }
+}
+
+
+/*
+void Interface::free_communicators()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->free_communicators();
+  else { // letter lacking redefinition of virtual fn.
+    // default is no-op
+  }
+}
+*/
+
+
+void Interface::init_serial()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->init_serial();
+  else { // letter lacking redefinition of virtual fn.
+    // ApproximationInterfaces: do nothing
+  }
+}
+
+
+int Interface::asynch_local_evaluation_concurrency() const
+{
+  if (interfaceRep) // envelope fwd to letter
+    return interfaceRep->asynch_local_evaluation_concurrency();
+  else // letter lacking redefinition of virtual fn.
+    return 0; // default (redefined only for ApplicationInterfaces)
+}
+
+
+short Interface::interface_synchronization() const
+{
+  if (interfaceRep) // envelope fwd to letter
+    return interfaceRep->interface_synchronization(); // ApplicationInterfaces
+  else // letter lacking redefinition of virtual fn.
+    return SYNCHRONOUS_INTERFACE; // default (ApproximationInterfaces)
+}
+
+
+int Interface::minimum_points(bool constraint_flag) const
+{
+  if (interfaceRep) // envelope fwd to letter
+    return interfaceRep->minimum_points(constraint_flag);
+  else // letter lacking redefinition of virtual fn.
+    return 0; // default (currently redefined only for ApproximationInterfaces)
+}
+
+
+int Interface::recommended_points(bool constraint_flag) const
+{
+  if (interfaceRep) // envelope fwd to letter
+    return interfaceRep->recommended_points(constraint_flag);
+  else // letter lacking redefinition of virtual fn.
+    return 0; // default (currently redefined only for ApproximationInterfaces)
+}
+
+
+void Interface::approximation_function_indices(const IntSet& approx_fn_indices)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->approximation_function_indices(approx_fn_indices);
+  // else: default implementation is no-op
+}
+
+
+void Interface::
+update_approximation(const Variables& vars, const IntResponsePair& response_pr)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->update_approximation(vars, response_pr);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual update_approximation"
+         << "(Variables, IntResponsePair) function.\n       This interface "
+	 << "does not support approximation updating." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+update_approximation(const RealMatrix& samples, const IntResponseMap& resp_map)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->update_approximation(samples, resp_map);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual update_approximation"
+         << "(RealMatrix, IntResponseMap) function.\n       This interface "
+	 << "does not support approximation updating." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+update_approximation(const VariablesArray& vars_array,
+		     const IntResponseMap& resp_map)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->update_approximation(vars_array, resp_map);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual update_approximation"
+         << "(VariablesArray, IntResponseMap) function.\n       This interface "
+	 << "does not support approximation updating." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+append_approximation(const Variables& vars, const IntResponsePair& response_pr)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->append_approximation(vars, response_pr);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual append_approximation"
+	 << "(Variables, IntResponsePair) function.\n       This interface "
+	 << "does not support approximation appending." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+append_approximation(const RealMatrix& samples, const IntResponseMap& resp_map)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->append_approximation(samples, resp_map);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual append_approximation"
+         << "(RealMatrix, IntResponseMap) function.\n       This interface "
+	 << "does not support approximation appending." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+append_approximation(const VariablesArray& vars_array,
+		     const IntResponseMap& resp_map)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->append_approximation(vars_array, resp_map);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual append_approximation"
+         << "(VariablesArray, IntResponseMap) function.\n       This interface "
+	 << "does not support approximation appending." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+build_approximation(const RealVector&  c_l_bnds, const RealVector&  c_u_bnds,
+		    const IntVector&  di_l_bnds, const IntVector&  di_u_bnds,
+		    const RealVector& dr_l_bnds, const RealVector& dr_u_bnds)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->build_approximation(c_l_bnds, c_u_bnds, di_l_bnds, di_u_bnds,
+				      dr_l_bnds, dr_u_bnds);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual build_approximation"
+         << "() function.\n       This interface does not support "
+	 << "approximations." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::
+rebuild_approximation(const BoolDeque& rebuild_deque)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->rebuild_approximation(rebuild_deque);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual rebuild_"
+	 << "approximation() function.\n       This interface does not "
+	 << "support approximations." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::pop_approximation(bool save_surr_data)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->pop_approximation(save_surr_data);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual pop_approximation"
+	 << "(bool)\n       function. This interface does not support "
+	 << "approximation\n       data removal." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::restore_approximation()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->restore_approximation();
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual restore_"
+	 << "approximation() function.\n       This interface does not "
+	 << "support approximation restoration." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+bool Interface::restore_available()
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual restore_"
+	 << "available() function.\n       This interface does not "
+	 << "support approximation restoration queries." << std::endl;
+    abort_handler(-1);
+  }
+  return interfaceRep->restore_available();
+}
+
+
+void Interface::finalize_approximation()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->finalize_approximation();
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual finalize_"
+	 << "approximation() function.\n       This interface does not "
+	 << "support approximation finalization." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::store_approximation()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->store_approximation();
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual store_"
+	 << "approximation() function.\n       This interface does not "
+	 << "support approximation storage." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::combine_approximation(short corr_type)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->combine_approximation(corr_type);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual combine_"
+	 << "approximation() function.\n       This interface does not "
+	 << "support approximation combination." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+void Interface::clear_current()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->clear_current();
+  else { // letter lacking redefinition of virtual fn.
+    // ApplicationInterfaces: do nothing
+  }
+}
+
+
+void Interface::clear_all()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->clear_all();
+  else { // letter lacking redefinition of virtual fn.
+    // ApplicationInterfaces: do nothing
+  }
+}
+
+
+void Interface::clear_saved()
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->clear_saved();
+  else { // letter lacking redefinition of virtual fn.
+    // ApplicationInterfaces: do nothing
+  }
+}
+
+
+SharedApproxData& Interface::shared_approximation()
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual shared_approximation"
+         << "() function.\nThis interface does not support approximations."
+	 << std::endl;
+    abort_handler(-1);
+  }
+
+  // envelope fwd to letter
+  return interfaceRep->shared_approximation();
+}
+
+
+std::vector<Approximation>& Interface::approximations()
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual approximations() "
+	 << "function.\n       This interface does not support approximations."
+	 << std::endl;
+    abort_handler(-1);
+  }
+
+  // envelope fwd to letter
+  return interfaceRep->approximations();
+}
+
+
+const Pecos::SurrogateData& Interface::approximation_data(size_t index)
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual approximation_data "
+	 << "function.\n       This interface does not support approximations."
+	 << std::endl;
+    abort_handler(-1);
+  }
+
+  // envelope fwd to letter
+  return interfaceRep->approximation_data(index);
+}
+
+
+const RealVectorArray& Interface::approximation_coefficients(bool normalized)
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual approximation_"
+	 << "coefficients function.\n       This interface does not support "
+         << "approximations." << std::endl;
+    abort_handler(-1);
+  }
+
+  // envelope fwd to letter
+  return interfaceRep->approximation_coefficients(normalized);
+}
+
+
+void Interface::
+approximation_coefficients(const RealVectorArray& approx_coeffs,
+			   bool normalized)
+{
+  if (interfaceRep) // envelope fwd to letter
+    interfaceRep->approximation_coefficients(approx_coeffs, normalized);
+  else { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual approximation_"
+	 << "coefficients function.\n       This interface does not support "
+         << "approximations." << std::endl;
+    abort_handler(-1);
+  }
+}
+
+
+const RealVector& Interface::approximation_variances(const Variables& vars)
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual approximation_"
+	 << "variances function.\n       This interface does not support "
+         << "approximations." << std::endl;
+    abort_handler(-1);
+  }
+
+  // envelope fwd to letter
+  return interfaceRep->approximation_variances(vars);
+}
+
+
+const StringArray& Interface::analysis_drivers() const
+{
+  if (!interfaceRep) { // letter lacking redefinition of virtual fn.
+    Cerr << "Error: Letter lacking redefinition of virtual analysis_drivers "
+	 << "function." << std::endl;
+    abort_handler(-1);
+  }
+
+  // envelope fwd to letter
+  return interfaceRep->analysis_drivers();
+}
+
+
+bool Interface::evaluation_cache() const
+{
+  if (interfaceRep)
+    return interfaceRep->evaluation_cache();
+  else // letter lacking redefinition of virtual fn.
+    return false; // default
+}
+
+
+void Interface::file_cleanup() const
+{
+  if (interfaceRep)
+    interfaceRep->file_cleanup();
+  // else no-op
+}
+
+} // namespace Dakota
Index: /issm/trunk/externalpackages/dakota/configs/6.2/src/NonDLocalReliability.cpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/src/NonDLocalReliability.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/src/NonDLocalReliability.cpp	(revision 24686)
@@ -0,0 +1,2704 @@
+/*  _______________________________________________________________________
+
+    DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
+    Copyright 2014 Sandia Corporation.
+    This software is distributed under the GNU Lesser General Public License.
+    For more information, see the README file in the top Dakota directory.
+    _______________________________________________________________________ */
+
+//- Class:	 NonDLocalReliability
+//- Description: Implementation code for NonDLocalReliability class
+//- Owner:       Mike Eldred
+//- Checked by:
+//- Version:
+
+#include "dakota_system_defs.hpp"
+#include "DakotaResponse.hpp"
+#include "ParamResponsePair.hpp"
+#include "PRPMultiIndex.hpp"
+#include "ProblemDescDB.hpp"
+#include "DakotaGraphics.hpp"
+#include "NonDLocalReliability.hpp"
+#include "NonDAdaptImpSampling.hpp"
+#ifdef HAVE_NPSOL
+#include "NPSOLOptimizer.hpp"
+#endif
+#ifdef HAVE_OPTPP
+#include "SNLLOptimizer.hpp"
+using OPTPP::NLPFunction;
+using OPTPP::NLPGradient;
+#endif
+#include "RecastModel.hpp"
+#include "DataFitSurrModel.hpp"
+#include "NestedModel.hpp"
+#include "Teuchos_LAPACK.hpp"
+#include "Teuchos_SerialDenseHelpers.hpp"
+#include <algorithm>
+#include "dakota_data_io.hpp"
+
+//#define MPP_CONVERGE_RATE
+//#define DEBUG
+
+static const char rcsId[] = "@(#) $Id: NonDLocalReliability.cpp 4058 2006-10-25 01:39:40Z mseldre $";
+
+namespace Dakota {
+extern PRPCache data_pairs; // global container
+
+
+// define special values for componentParallelMode
+//#define SURROGATE_MODEL 1
+#define TRUTH_MODEL 2
+
+
+// initialization of statics
+NonDLocalReliability* NonDLocalReliability::nondLocRelInstance(NULL);
+
+
+NonDLocalReliability::
+NonDLocalReliability(ProblemDescDB& problem_db, Model& model):
+  NonDReliability(problem_db, model),
+  initialPtUserSpec(
+    probDescDB.get_bool("variables.uncertain.initial_point_flag")),
+  warmStartFlag(true), nipModeOverrideFlag(true),
+  curvatureDataAvailable(false), kappaUpdated(false),
+  secondOrderIntType(HOHENRACK), curvatureThresh(1.e-10), warningBits(0)
+{
+  // check for suitable gradient and variables specifications
+  if (iteratedModel.gradient_type() == "none") {
+    Cerr << "\nError: local_reliability requires a gradient specification."
+	 << std::endl;
+    abort_handler(-1);
+  }
+  if (numEpistemicUncVars) {
+    Cerr << "Error: epistemic variables are not supported in local "
+	 << "reliability methods." << std::endl;
+    abort_handler(-1);
+  }
+
+  if (mppSearchType) { // default is MV = 0
+
+    // Map MPP search NIP/SQP algorithm specification into an NPSOL/OPT++
+    // selection based on configuration availability.
+#if !defined(HAVE_NPSOL) && !defined(HAVE_OPTPP)
+    Cerr << "Error: this executable not configured with NPSOL or OPT++.\n"
+	 << "       NonDLocalReliability cannot perform MPP search."
+         << std::endl;
+    abort_handler(-1);
+#endif
+    unsigned short mpp_optimizer = probDescDB.get_ushort("method.sub_method");
+    if (mpp_optimizer == SUBMETHOD_SQP) {
+#ifdef HAVE_NPSOL
+      npsolFlag = true;
+#else
+      Cerr << "\nError: this executable not configured with NPSOL SQP.\n"
+	   << "         Please select OPT++ NIP within local_reliability."
+	   << std::endl;
+      abort_handler(-1);
+#endif
+    }
+    else if (mpp_optimizer == SUBMETHOD_NIP) {
+#ifdef HAVE_OPTPP
+      npsolFlag = false;
+#else
+      Cerr << "\nError: this executable not configured with OPT++ NIP.\n"
+	   << "         please select NPSOL SQP within local_reliability."
+	   << std::endl;
+      abort_handler(-1);
+#endif
+    }
+    else if (mpp_optimizer == SUBMETHOD_DEFAULT) {
+#ifdef HAVE_NPSOL
+      npsolFlag = true;
+#elif HAVE_OPTPP
+      npsolFlag = false;
+#endif
+    }
+
+    // Error check for a specification of at least 1 level for MPP methods
+    if (!totalLevelRequests) {
+      Cerr << "\nError: An MPP search method requires the specification of at "
+	   << "least one response, probability, or reliability level."
+	   << std::endl;
+      abort_handler(-1);
+    }
+  }
+
+  // Prevent nesting of an instance of a Fortran iterator within another
+  // instance of the same iterator (which would result in data clashes since
+  // Fortran does not support object independence).  Recurse through all
+  // sub-models and test each sub-iterator for SOL presence.
+  // Note 1: This check is performed for DOT, CONMIN, and SOLBase, but not
+  //         for LHS since it is only active in pre-processing.
+  // Note 2: NPSOL/NLSSOL on the outer loop with NonDLocalReliability on the
+  //         inner loop precludes all NPSOL-based MPP searches;
+  //         NonDLocalReliability on the outer loop with NPSOL/NLSSOL on an
+  //         inner loop is only a problem for the no_approx MPP search (since
+  //         iteratedModel is not invoked w/i an approx-based MPP search).
+  if (mppSearchType == NO_APPROX && npsolFlag) {
+    Iterator sub_iterator = iteratedModel.subordinate_iterator();
+    if (!sub_iterator.is_null() &&
+	( sub_iterator.method_name() ==  NPSOL_SQP ||
+	  sub_iterator.method_name() == NLSSOL_SQP ||
+	  sub_iterator.uses_method() ==  NPSOL_SQP ||
+	  sub_iterator.uses_method() == NLSSOL_SQP ) )
+      sub_iterator.method_recourse();
+    ModelList& sub_models = iteratedModel.subordinate_models();
+    for (ModelLIter ml_iter = sub_models.begin();
+	 ml_iter != sub_models.end(); ml_iter++) {
+      sub_iterator = ml_iter->subordinate_iterator();
+      if (!sub_iterator.is_null() &&
+	  ( sub_iterator.method_name() ==  NPSOL_SQP ||
+	    sub_iterator.method_name() == NLSSOL_SQP ||
+	    sub_iterator.uses_method() ==  NPSOL_SQP ||
+	    sub_iterator.uses_method() == NLSSOL_SQP ) )
+	sub_iterator.method_recourse();
+    }
+  }
+
+  // Map response Hessian specification into taylorOrder for use by MV/AMV/AMV+
+  // variants.  Note that taylorOrder and integrationOrder are independent
+  // (although the Hessian specification required for 2nd-order integration
+  // means that taylorOrder = 2 will be used for MV/AMV/AMV+; taylorOrder = 2
+  // may however be used with 1st-order integration).
+  const String& hess_type = iteratedModel.hessian_type();
+  taylorOrder = (hess_type != "none" && mppSearchType <= AMV_PLUS_U) ? 2 : 1;
+
+  // assign iterator-specific defaults for approximation-based MPP searches
+  if (maxIterations <  0          && // DataMethod default = -1
+      mppSearchType >= AMV_PLUS_X && mppSearchType < NO_APPROX) // approx-based
+    maxIterations = 25;
+
+  // The model of the limit state in u-space (uSpaceModel) is constructed here
+  // one time.  The RecastModel for the RIA/PMA formulations varies with the
+  // level requests and is constructed for each level within mpp_search().
+
+  // Instantiate the Nataf Recast and any DataFit model recursions.  Recast is
+  // bounded to 10 std devs in u space.  This is particularly important for PMA
+  // since an SQP-based optimizer will not enforce the constraint immediately
+  // and min +/-g has been observed to have significant excursions early on
+  // prior to the u'u = beta^2 constraint enforcement bringing it back.  A
+  // large excursion can cause overflow; a medium excursion can cause poor
+  // performance since far-field info is introduced into the BFGS Hessian.
+  if (mppSearchType ==  AMV_X || mppSearchType == AMV_PLUS_X ||
+      mppSearchType == TANA_X) { // Recast( DataFit( iteratedModel ) )
+
+    // Construct g-hat(x) using a local/multipoint approximation over the
+    // uncertain variables (using the same view as iteratedModel).
+    Model g_hat_x_model;
+    String sample_reuse, approx_type = (mppSearchType == TANA_X) ?
+      "multipoint_tana" : "local_taylor";
+    UShortArray approx_order(1, taylorOrder);
+    short corr_order = -1, corr_type = NO_CORRECTION,
+      data_order = (taylorOrder == 2) ? 7 : 3;
+    int samples = 0, seed = 0;
+    Iterator dace_iterator;
+    //const Variables& curr_vars = iteratedModel.current_variables();
+    ActiveSet surr_set = iteratedModel.current_response().active_set(); // copy
+    surr_set.request_values(3); // surrogate gradient evals
+    g_hat_x_model.assign_rep(new DataFitSurrModel(dace_iterator, iteratedModel,
+      surr_set, approx_type, approx_order, corr_type, corr_order, data_order,
+      outputLevel, sample_reuse), false);
+
+    // transform g_hat_x_model from x-space to u-space
+    transform_model(g_hat_x_model, uSpaceModel, true); // globally bounded
+  }
+  else if (mppSearchType ==  AMV_U || mppSearchType == AMV_PLUS_U ||
+	   mppSearchType == TANA_U) { // DataFit( Recast( iteratedModel ) )
+
+    // Recast g(x) to G(u)
+    Model g_u_model;
+    transform_model(iteratedModel, g_u_model, true); // globally bounded
+
+    // Construct G-hat(u) using a local/multipoint approximation over the
+    // uncertain variables (using the same view as iteratedModel/g_u_model).
+    String sample_reuse, approx_type = (mppSearchType == TANA_U) ?
+      "multipoint_tana" : "local_taylor";
+    UShortArray approx_order(1, taylorOrder);
+    short corr_order = -1, corr_type = NO_CORRECTION,
+      data_order = (taylorOrder == 2) ? 7 : 3;
+    int samples = 0, seed = 0;
+    Iterator dace_iterator;
+    //const Variables& g_u_vars = g_u_model.current_variables();
+    ActiveSet surr_set = g_u_model.current_response().active_set(); // copy
+    surr_set.request_values(3); // surrogate gradient evals
+    uSpaceModel.assign_rep(new DataFitSurrModel(dace_iterator, g_u_model,
+      surr_set, approx_type, approx_order, corr_type, corr_order, data_order,
+      outputLevel, sample_reuse), false);
+  }
+  else if (mppSearchType == NO_APPROX) // Recast( iteratedModel )
+    // Recast g(x) to G(u)
+    transform_model(iteratedModel, uSpaceModel, true); // globally bounded
+
+  // configure a RecastModel with one objective and one equality constraint
+  // using the alternate minimalist constructor
+  if (mppSearchType) {
+    SizetArray recast_vars_comps_total;  // default: empty; no change in size
+    BitArray all_relax_di, all_relax_dr; // default: empty; no discrete relax
+    mppModel.assign_rep(
+      new RecastModel(uSpaceModel, recast_vars_comps_total,
+		      all_relax_di, all_relax_dr, 1, 1, 0), false);
+    RealVector nln_eq_targets(1, false); nln_eq_targets = 0.;
+    mppModel.nonlinear_eq_constraint_targets(nln_eq_targets);
+
+    // Use NPSOL/OPT++ in "user_functions" mode to perform the MPP search
+    if (npsolFlag) {
+      // NPSOL deriv level: 1 = supplied grads of objective fn, 2 = supplied
+      // grads of constraints, 3 = supplied grads of both.  Always use the
+      // supplied grads of u'u (deriv level = 1 for RIA, deriv level = 2 for
+      // PMA).  In addition, use supplied gradients of G(u) in most cases.
+      // Exception: deriv level = 3 results in a gradient-based line search,
+      // which could be too expensive for FORM with numerical grads unless
+      // seeking parallel load balance.
+      //int npsol_deriv_level;
+      //if ( mppSearchType == NO_APPROX && !iteratedModel.asynch_flag()
+      //     && iteratedModel.gradient_type() != "analytic" )
+      //  npsol_deriv_level = (ria_flag) ? 1 : 2;
+      //else
+      //  npsol_deriv_level = 3;
+      //Cout << "Derivative level = " << npsol_deriv_level << '\n';
+
+      // The gradient-based line search (deriv. level = 3) appears to be
+      // outperforming the value-based line search in PMA testing.  In
+      // addition, the RIA warm start needs fnGradU so deriv. level = 3 has
+      // superior performance there as well.  Therefore, deriv level = 3 can
+      // be used for all cases.
+      int npsol_deriv_level = 3;
+
+      // run a tighter tolerance on approximation-based MPP searches
+      //Real conv_tol = (mppSearchType == NO_APPROX) ? 1.e-4 : 1.e-6;
+      Real conv_tol = -1.; // use NPSOL default
+
+#ifdef HAVE_NPSOL
+      mppOptimizer.assign_rep(new NPSOLOptimizer(mppModel, npsol_deriv_level,
+	conv_tol), false);
+#endif
+    }
+#ifdef HAVE_OPTPP
+    else
+      mppOptimizer.assign_rep(new SNLLOptimizer("optpp_q_newton", mppModel),
+	false);
+#endif
+  }
+
+  // Map integration specification into integrationOrder.  Second-order
+  // integration requires an MPP search in u-space, and is not warranted for
+  // unconverged MPP's (AMV variants).  In addition, AMV variants only compute
+  // verification function values at u* (no Hessians).  For an AMV-like
+  // approach with 2nd-order integration, use AMV+ with max_iterations = 1.
+  const String& integration_method
+    = probDescDB.get_string("method.nond.reliability_integration");
+  if (integration_method.empty() || integration_method == "first_order")
+    integrationOrder = 1;
+  else if (integration_method == "second_order") {
+    if (hess_type == "none") {
+      Cerr << "\nError: second-order integration requires Hessian "
+	   << "specification." << std::endl;
+      abort_handler(-1);
+    }
+    else if (mppSearchType <= AMV_U) {
+      Cerr << "\nError: second-order integration only supported for fully "
+	   << "converged MPP methods." << std::endl;
+      abort_handler(-1);
+    }
+    else
+      integrationOrder = 2;
+  }
+  else {
+    Cerr << "Error: bad integration selection in NonDLocalReliability."
+	 << std::endl;
+    abort_handler(-1);
+  }
+
+  if (integrationRefinement) {
+    for (size_t i=0; i<numFunctions; i++)
+      if (!requestedProbLevels[i].empty() || !requestedRelLevels[i].empty() ||
+	  !requestedGenRelLevels[i].empty()) {
+	Cerr << "\nError: importance sampling methods only supported for RIA."
+	     << "\n\n";
+	abort_handler(-1);
+      }
+    // integration refinement requires an MPP, but it may be unconverged (AMV)
+    if (!mppSearchType) {
+      Cerr << "\nError: integration refinement only supported for MPP methods."
+	   << std::endl;
+      abort_handler(-1);
+    }
+
+    // For NonDLocal, integration refinement is applied to the original model
+    int refine_samples = probDescDB.get_int("method.nond.refinement_samples"),
+        refine_seed    = probDescDB.get_int("method.random_seed");
+    if (!refine_samples) refine_samples = 1000; // context-specific default
+
+    unsigned short sample_type = SUBMETHOD_DEFAULT;
+    String rng; // empty string: use default
+
+    // Note: global bounds definition in transform_model() can be true
+    // (to bound an optimizer search) with AIS use_model_bounds = false
+    // (AIS will ignore these global bounds).
+    bool x_model_flag = false, use_model_bounds = false, vary_pattern = true;
+
+    // AIS is performed in u-space WITHOUT a surrogate: pass a truth u-space
+    // model when available, construct one when not.
+    switch (mppSearchType) {
+    case AMV_X: case AMV_PLUS_X: case TANA_X: {
+      Model g_u_model;
+      transform_model(iteratedModel, g_u_model); // global bounds not needed
+      importanceSampler.assign_rep(new
+        NonDAdaptImpSampling(g_u_model, sample_type, refine_samples,
+	  refine_seed, rng, vary_pattern, integrationRefinement, cdfFlag,
+	  x_model_flag, use_model_bounds), false);
+      break;
+    }
+    case AMV_U: case AMV_PLUS_U: case TANA_U:
+      importanceSampler.assign_rep(new
+	NonDAdaptImpSampling(uSpaceModel.truth_model(), sample_type,
+	  refine_samples, refine_seed, rng, vary_pattern, integrationRefinement,
+	  cdfFlag, x_model_flag, use_model_bounds), false);
+      break;
+    case NO_APPROX:
+      importanceSampler.assign_rep(new
+        NonDAdaptImpSampling(uSpaceModel, sample_type, refine_samples,
+	  refine_seed, rng, vary_pattern, integrationRefinement, cdfFlag,
+	  x_model_flag, use_model_bounds), false);
+      break;
+    }
+  }
+
+  // Size the output arrays.  Relative to sampling methods, the output storage
+  // for reliability methods is more substantial since there may be differences
+  // between requested and computed levels for the same measure (the request is
+  // not always achieved) and since probability and reliability are carried
+  // along in parallel (due to their direct correspondence).
+  computedRelLevels.resize(numFunctions);
+  for (size_t i=0; i<numFunctions; i++) {
+    size_t num_levels = requestedRespLevels[i].length() +
+      requestedProbLevels[i].length() + requestedRelLevels[i].length() +
+      requestedGenRelLevels[i].length();
+    computedRespLevels[i].resize(num_levels);
+    computedProbLevels[i].resize(num_levels);
+    computedRelLevels[i].resize(num_levels);
+    computedGenRelLevels[i].resize(num_levels);
+  }
+
+  // Size class-scope arrays.
+  mostProbPointX.sizeUninitialized(numUncertainVars);
+  mostProbPointU.sizeUninitialized(numUncertainVars);
+  fnGradX.sizeUninitialized(numUncertainVars);
+  fnGradU.sizeUninitialized(numUncertainVars);
+  if (taylorOrder == 2 || integrationOrder == 2) {
+    fnHessX.shapeUninitialized(numUncertainVars);
+    fnHessU.shapeUninitialized(numUncertainVars);
+    if (hess_type == "quasi") {
+      // Note: fnHess=0 in both spaces is not self-consistent for nonlinear
+      // transformations.  However, the point is to use a first-order
+      // approximation in either space prior to curvature accumulation.
+      fnHessX = 0.;
+      fnHessU = 0.;
+    }
+    kappaU.sizeUninitialized(numUncertainVars-1);
+  }
+}
+
+
+NonDLocalReliability::~NonDLocalReliability()
+{ }
+
+
+void NonDLocalReliability::derived_init_communicators(ParLevLIter pl_iter)
+{
+  iteratedModel.init_communicators(pl_iter, maxEvalConcurrency);
+  if (mppSearchType) {
+    // uSpaceModel, mppOptimizer, and importanceSampler use NoDBBaseConstructor,
+    // so no need to manage DB list nodes at this level
+
+    // maxEvalConcurrency defined from the derivative concurrency in the
+    // responses specification.  For FORM/SORM, the NPSOL/OPT++ concurrency
+    // is the same, but for approximate methods, the concurrency is dictated
+    // by the gradType/hessType logic in the instantiate on-the-fly
+    // DataFitSurrModel constructor.
+    uSpaceModel.init_communicators(pl_iter, maxEvalConcurrency);
+    // TO DO: distinguish gradient concurrency for truth vs. surrogate?
+    //        (probably doesn't matter for surrogate)
+
+    mppOptimizer.init_communicators(pl_iter);
+
+    if (integrationRefinement)
+      importanceSampler.init_communicators(pl_iter);
+  }
+}
+
+
+void NonDLocalReliability::derived_set_communicators(ParLevLIter pl_iter)
+{
+  NonD::derived_set_communicators(pl_iter);
+
+  if (mppSearchType) {
+    uSpaceModel.set_communicators(pl_iter, maxEvalConcurrency);
+    mppOptimizer.set_communicators(pl_iter);
+    if (integrationRefinement)
+      importanceSampler.set_communicators(pl_iter);
+  }
+}
+
+
+void NonDLocalReliability::derived_free_communicators(ParLevLIter pl_iter)
+{
+  if (mppSearchType) {
+    if (integrationRefinement)
+      importanceSampler.free_communicators(pl_iter);
+    mppOptimizer.free_communicators(pl_iter);
+    uSpaceModel.free_communicators(pl_iter, maxEvalConcurrency);
+  }
+  iteratedModel.free_communicators(pl_iter, maxEvalConcurrency);
+}
+
+
+void NonDLocalReliability::quantify_uncertainty()
+{
+  if (mppSearchType) mpp_search();
+  else               mean_value();
+
+  numRelAnalyses++;
+}
+
+
+void NonDLocalReliability::mean_value()
+{
+  // For MV, compute approximate mean, standard deviation, and requested
+  // CDF/CCDF data points for each response function and store in
+  // finalStatistics.  Additionally, if uncorrelated variables, compute
+  // importance factors.
+
+  initialize_random_variable_parameters();
+  initial_taylor_series();
+
+  // initialize arrays
+  impFactor.shapeUninitialized(numUncertainVars, numFunctions);
+  statCount = 0;
+  initialize_final_statistics_gradients();
+
+  // local reliability data aren't output to tabular, so send directly
+  // to graphics window only
+  Graphics& dakota_graphics = parallelLib.output_manager().graphics();
+
+  // loop over response functions
+  size_t i;
+  const ShortArray& final_asv = finalStatistics.active_set_request_vector();
+  for (respFnCount=0; respFnCount<numFunctions; respFnCount++) {
+    Real& mean    = momentStats(0,respFnCount);
+    Real& std_dev = momentStats(1,respFnCount);
+
+    // approximate response means already computed
+    finalStatistics.function_value(mean, statCount);
+    // sensitivity of response mean
+    if (final_asv[statCount] & 2) {
+      RealVector fn_grad_mean_x(numUncertainVars, false);
+      for (i=0; i<numUncertainVars; i++)
+	fn_grad_mean_x[i] = fnGradsMeanX(i,respFnCount);
+      // evaluate dg/ds at the variable means and store in finalStatistics
+      RealVector final_stat_grad;
+      dg_ds_eval(natafTransform.x_means(), fn_grad_mean_x, final_stat_grad);
+      finalStatistics.function_gradient(final_stat_grad, statCount);
+    }
+    statCount++;
+
+    // approximate response std deviations already computed
+    finalStatistics.function_value(std_dev, statCount);
+    // sensitivity of response std deviation
+    if (final_asv[statCount] & 2) {
+      // Differentiating the first-order second-moment expression leads to
+      // 2nd-order d^2g/dxds sensitivities which would be awkward to compute
+      // (nonstandard DVV containing active and inactive vars)
+      Cerr << "Error: response std deviation sensitivity not yet supported."
+           << std::endl;
+      abort_handler(-1);
+    }
+    statCount++;
+
+    // if inputs are uncorrelated, compute importance factors
+    if (!natafTransform.x_correlation() && std_dev > Pecos::SMALL_NUMBER) {
+      const Pecos::RealVector& x_std_devs = natafTransform.x_std_deviations();
+      for (i=0; i<numUncertainVars; i++)
+        impFactor(i,respFnCount) = std::pow(x_std_devs[i] / std_dev *
+					    fnGradsMeanX(i,respFnCount), 2);
+    }
+
+    // compute probability/reliability levels for requested response levels and
+    // compute response levels for requested probability/reliability levels.
+    // For failure defined as g<0, beta is simply mean/sigma.  This is extended
+    // to compute general cumulative probabilities for g<z or general
+    // complementary cumulative probabilities for g>z.
+    size_t rl_len = requestedRespLevels[respFnCount].length(),
+           pl_len = requestedProbLevels[respFnCount].length(),
+           bl_len = requestedRelLevels[respFnCount].length(),
+           gl_len = requestedGenRelLevels[respFnCount].length();
+    for (levelCount=0; levelCount<rl_len; levelCount++) {
+      // computed = requested in MV case since no validation fn evals
+      Real z = computedRespLevels[respFnCount][levelCount]
+	= requestedRespLevels[respFnCount][levelCount];
+      // compute beta and p from z
+      Real beta, p;
+      if (std_dev > Pecos::SMALL_NUMBER) {
+	Real ratio = (mean - z)/std_dev;
+        beta = computedRelLevels[respFnCount][levelCount]
+	  = computedGenRelLevels[respFnCount][levelCount]
+	  = (cdfFlag) ? ratio : -ratio;
+        p = computedProbLevels[respFnCount][levelCount] = probability(beta);
+      }
+      else {
+        if ( ( cdfFlag && mean <= z) ||
+	     (!cdfFlag && mean >  z) ) {
+          beta = computedRelLevels[respFnCount][levelCount]
+	    = computedGenRelLevels[respFnCount][levelCount]
+	    = -Pecos::LARGE_NUMBER;
+          p = computedProbLevels[respFnCount][levelCount] = 1.;
+	}
+	else {
+          beta = computedRelLevels[respFnCount][levelCount]
+	    = computedGenRelLevels[respFnCount][levelCount]
+	    = Pecos::LARGE_NUMBER;
+          p = computedProbLevels[respFnCount][levelCount] = 0.;
+	}
+      }
+      switch (respLevelTarget) {
+      case PROBABILITIES:
+	finalStatistics.function_value(p, statCount);    break;
+      case RELIABILITIES: case GEN_RELIABILITIES:
+	finalStatistics.function_value(beta, statCount); break;
+      }
+      if (final_asv[statCount] & 2) {
+	Cerr << "Error: response probability/reliability/gen_reliability level "
+	     << "sensitivity not supported for Mean Value." << std::endl;
+	abort_handler(-1);
+      }
+      statCount++;
+      // Update specialty graphics
+      if (!subIteratorFlag)
+	dakota_graphics.add_datapoint(respFnCount, z, p);
+    }
+    for (i=0; i<pl_len; i++) {
+      levelCount = i+rl_len;
+      // computed = requested in MV case since no validation fn evals
+      Real p = computedProbLevels[respFnCount][levelCount]
+	= requestedProbLevels[respFnCount][i];
+      // compute beta and z from p
+      Real beta = computedRelLevels[respFnCount][levelCount]
+	= computedGenRelLevels[respFnCount][levelCount]	= reliability(p);
+      Real z = computedRespLevels[respFnCount][levelCount] = (cdfFlag)
+        ? mean - beta * std_dev : mean + beta * std_dev;
+      finalStatistics.function_value(z, statCount);
+      if (final_asv[statCount] & 2) {
+	Cerr << "Error: response level sensitivity not supported for Mean "
+	     << "Value." << std::endl;
+	abort_handler(-1);
+      }
+      statCount++;
+      // Update specialty graphics
+      if (!subIteratorFlag)
+	dakota_graphics.add_datapoint(respFnCount, z, p);
+    }
+    for (i=0; i<bl_len+gl_len; i++) {
+      levelCount = i+rl_len+pl_len;
+      // computed = requested in MV case since no validation fn evals
+      Real beta = (i<bl_len) ? requestedRelLevels[respFnCount][i] :
+	requestedGenRelLevels[respFnCount][i-bl_len];
+      computedRelLevels[respFnCount][levelCount]
+	= computedGenRelLevels[respFnCount][levelCount] = beta;
+      // compute p and z from beta
+      Real p = computedProbLevels[respFnCount][levelCount] = probability(beta);
+      Real z = computedRespLevels[respFnCount][levelCount] = (cdfFlag)
+        ? mean - beta * std_dev	: mean + beta * std_dev;
+      finalStatistics.function_value(z, statCount);
+      if (final_asv[statCount] & 2) {
+	Cerr << "Error: response level sensitivity not supported for Mean "
+	     << "Value." << std::endl;
+	abort_handler(-1);
+      }
+      statCount++;
+      // Update specialty graphics
+      if (!subIteratorFlag)
+	dakota_graphics.add_datapoint(respFnCount, z, p);
+    }
+  }
+}
+
+
+void NonDLocalReliability::mpp_search()
+{
+  // set the object instance pointer for use within the static member fns
+  NonDLocalReliability* prev_instance = nondLocRelInstance;
+  nondLocRelInstance = this;
+
+  // The following 2 calls must precede use of natafTransform.trans_X_to_U()
+  initialize_random_variable_parameters();
+  // Modify the correlation matrix (Nataf) and compute its Cholesky factor.
+  // Since the uncertain variable distributions (means, std devs, correlations)
+  // may change among NonDLocalReliability invocations (e.g., RBDO with design
+  // variable insertion), this code block is performed on every invocation.
+  transform_correlations();
+
+  // initialize initialPtUSpec on first reliability analysis; needs to precede
+  // iteratedModel.continuous_variables() assignment in initial_taylor_series()
+  if (numRelAnalyses == 0) {
+    if (initialPtUserSpec)
+      natafTransform.trans_X_to_U(iteratedModel.continuous_variables(),
+				  initialPtUSpec);
+    else {
+      // don't use the mean uncertain variable defaults from the parser
+      // since u ~= 0 can cause problems for some formulations
+      initialPtUSpec.sizeUninitialized(numUncertainVars);
+      initialPtUSpec = 1.;
+    }
+  }
+
+  // sets iteratedModel.continuous_variables() to mean values
+  initial_taylor_series();
+
+  // Initialize local arrays
+  statCount = 0;
+  initialize_final_statistics_gradients();
+
+  // Initialize class scope arrays, modify the correlation matrix, and
+  // evaluate median responses
+  initialize_class_data();
+
+  // Loop over each response function in the responses specification.  It is
+  // important to note that the MPP iteration is different for each response
+  // function, and it is not possible to combine the model evaluations for
+  // multiple response functions.
+  size_t i;
+  const ShortArray& final_asv = finalStatistics.active_set_request_vector();
+  for (respFnCount=0; respFnCount<numFunctions; respFnCount++) {
+
+    // approximate response means already computed
+    finalStatistics.function_value(momentStats(0,respFnCount), statCount);
+    // sensitivity of response mean
+    if (final_asv[statCount] & 2) {
+      RealVector fn_grad_mean_x(numUncertainVars, false);
+      for (i=0; i<numUncertainVars; i++)
+	fn_grad_mean_x[i] = fnGradsMeanX(i,respFnCount);
+      // evaluate dg/ds at the variable means and store in finalStatistics
+      RealVector final_stat_grad;
+      dg_ds_eval(natafTransform.x_means(), fn_grad_mean_x, final_stat_grad);
+      finalStatistics.function_gradient(final_stat_grad, statCount);
+    }
+    statCount++;
+
+    // approximate response std deviations already computed
+    finalStatistics.function_value(momentStats(1,respFnCount), statCount);
+    // sensitivity of response std deviation
+    if (final_asv[statCount] & 2) {
+      // Differentiating the first-order second-moment expression leads to
+      // 2nd-order d^2g/dxds sensitivities which would be awkward to compute
+      // (nonstandard DVV containing active and inactive vars)
+      Cerr << "Error: response std deviation sensitivity not yet supported."
+           << std::endl;
+      abort_handler(-1);
+    }
+    statCount++;
+
+    // The most general case is to allow a combination of response, probability,
+    // reliability, and generalized reliability level specifications for each
+    // response function.
+    size_t rl_len = requestedRespLevels[respFnCount].length(),
+           pl_len = requestedProbLevels[respFnCount].length(),
+           bl_len = requestedRelLevels[respFnCount].length(),
+           gl_len = requestedGenRelLevels[respFnCount].length(),
+           index, num_levels = rl_len + pl_len + bl_len + gl_len;
+
+    // Initialize (or warm-start for repeated reliability analyses) initialPtU,
+    // mostProbPointX/U, computedRespLevel, fnGradX/U, and fnHessX/U.
+    curvatureDataAvailable = false; // no data (yet) for this response function
+    if (num_levels)
+      initialize_level_data();
+
+    // Loop over response/probability/reliability levels
+    for (levelCount=0; levelCount<num_levels; levelCount++) {
+
+      // The rl_len response levels are performed first using the RIA
+      // formulation, followed by the pl_len probability levels and the
+      // bl_len reliability levels using the PMA formulation.
+      bool ria_flag = (levelCount < rl_len),
+	pma2_flag = ( integrationOrder == 2 && ( levelCount < rl_len + pl_len ||
+		      levelCount >= rl_len + pl_len + bl_len ) );
+      if (ria_flag) {
+        requestedTargetLevel = requestedRespLevels[respFnCount][levelCount];
+	Cout << "\n>>>>> Reliability Index Approach (RIA) for response level "
+	     << levelCount+1 << " = " << requestedTargetLevel << '\n';
+      }
+      else if (levelCount < rl_len + pl_len) {
+	index  = levelCount - rl_len;
+	Real p = requestedProbLevels[respFnCount][index];
+	Cout << "\n>>>>> Performance Measure Approach (PMA) for probability "
+	     << "level " << index + 1 << " = " << p << '\n';
+	// gen beta target for 2nd-order PMA; beta target for 1st-order PMA:
+	requestedTargetLevel = reliability(p);
+
+	// CDF probability < 0.5  -->  CDF beta > 0  -->  minimize g
+	// CDF probability > 0.5  -->  CDF beta < 0  -->  maximize g
+	// CDF probability = 0.5  -->  CDF beta = 0  -->  compute g
+	// Note: "compute g" means that min/max is irrelevant since there is
+	// a single G(u) value when the radius beta collapses to the origin
+	Real p_cdf   = (cdfFlag) ? p : 1. - p;
+	pmaMaximizeG = (p_cdf > 0.5); // updated in update_pma_maximize()
+      }
+      else if (levelCount < rl_len + pl_len + bl_len) {
+	index = levelCount - rl_len - pl_len;
+	requestedTargetLevel = requestedRelLevels[respFnCount][index];
+	Cout << "\n>>>>> Performance Measure Approach (PMA) for reliability "
+	     << "level " << index + 1 << " = " << requestedTargetLevel << '\n';
+	Real beta_cdf = (cdfFlag) ?
+	  requestedTargetLevel : -requestedTargetLevel;
+	pmaMaximizeG = (beta_cdf < 0.);
+      }
+      else {
+	index = levelCount - rl_len - pl_len - bl_len;
+	requestedTargetLevel = requestedGenRelLevels[respFnCount][index];
+	Cout << "\n>>>>> Performance Measure Approach (PMA) for generalized "
+	     << "reliability level " << index + 1 << " = "
+	     << requestedTargetLevel << '\n';
+	Real gen_beta_cdf = (cdfFlag) ?
+	  requestedTargetLevel : -requestedTargetLevel;
+	pmaMaximizeG = (gen_beta_cdf < 0.); // updated in update_pma_maximize()
+      }
+
+      // Assign cold/warm-start values for initialPtU, mostProbPointX/U,
+      // computedRespLevel, fnGradX/U, and fnHessX/U.
+      if (levelCount)
+	initialize_mpp_search_data();
+
+#ifdef DERIV_DEBUG
+      // numerical verification of analytic Jacobian/Hessian routines
+      if (mppSearchType == NO_APPROX && levelCount == 0)
+        mostProbPointU = ranVarMeansU;//mostProbPointX = ranVarMeansX;
+      //natafTransform.verify_trans_jacobian_hessian(mostProbPointU);
+      //natafTransform.verify_trans_jacobian_hessian(mostProbPointX);
+      natafTransform.verify_design_jacobian(mostProbPointU);
+#endif // DERIV_DEBUG
+
+      // For AMV+/TANA approximations, iterate until current expansion point
+      // converges to the MPP.
+      approxIters = 0;
+      approxConverged = false;
+      while (!approxConverged) {
+
+	Sizet2DArray vars_map, primary_resp_map, secondary_resp_map;
+	BoolDequeArray nonlinear_resp_map(2);
+	RecastModel* mpp_model_rep = (RecastModel*)mppModel.model_rep();
+	if (ria_flag) { // RIA: g is in constraint
+	  primary_resp_map.resize(1);   // one objective, no contributors
+	  secondary_resp_map.resize(1); // one constraint, one contributor
+	  secondary_resp_map[0].resize(1);
+	  secondary_resp_map[0][0] = respFnCount;
+	  nonlinear_resp_map[1] = BoolDeque(1, false);
+	  mpp_model_rep->initialize(vars_map, false, NULL, NULL,
+	    primary_resp_map, secondary_resp_map, nonlinear_resp_map,
+	    RIA_objective_eval, RIA_constraint_eval);
+	}
+	else { // PMA: g is in objective
+	  primary_resp_map.resize(1);   // one objective, one contributor
+	  primary_resp_map[0].resize(1);
+	  primary_resp_map[0][0] = respFnCount;
+	  secondary_resp_map.resize(1); // one constraint, no contributors
+	  nonlinear_resp_map[0] = BoolDeque(1, false);
+	  // If 2nd-order PMA with p-level or generalized beta-level, use
+	  // PMA2_set_mapping() & PMA2_constraint_eval().  For approx-based
+	  // 2nd-order PMA, we utilize curvature of the surrogate (if any)
+	  // to update beta*
+	  if (pma2_flag)
+	    mpp_model_rep->initialize(vars_map, false, NULL, PMA2_set_mapping,
+	      primary_resp_map, secondary_resp_map, nonlinear_resp_map,
+	      PMA_objective_eval, PMA2_constraint_eval);
+	  else
+	    mpp_model_rep->initialize(vars_map, false, NULL, NULL,
+	      primary_resp_map, secondary_resp_map, nonlinear_resp_map,
+	      PMA_objective_eval, PMA_constraint_eval);
+	}
+	mppModel.continuous_variables(initialPtU);
+
+        // Execute MPP search and retrieve u-space results
+        Cout << "\n>>>>> Initiating search for most probable point (MPP)\n";
+	ParLevLIter pl_iter
+	  = methodPCIter->mi_parallel_level_iterator(miPLIndex);
+	mppOptimizer.run(pl_iter);
+        const Variables& vars_star = mppOptimizer.variables_results();
+        const Response&  resp_star = mppOptimizer.response_results();
+	const RealVector& fns_star = resp_star.function_values();
+        Cout << "\nResults of MPP optimization:\nInitial point (u-space) =\n"
+             << initialPtU << "Final point (u-space)   =\n";
+	write_data(Cout, vars_star.continuous_variables());
+	if (ria_flag)
+	  Cout << "RIA optimum             =\n                     "
+	       << std::setw(write_precision+7) << fns_star[0] << " [u'u]\n"
+	       << "                     " << std::setw(write_precision+7)
+	       << fns_star[1] << " [G(u) - z]\n";
+	else {
+	  Cout << "PMA optimum             =\n                     "
+	       << std::setw(write_precision+7) << fns_star[0] << " [";
+	  if (pmaMaximizeG) Cout << '-';
+	  Cout << "G(u)]\n                     " << std::setw(write_precision+7)
+	       << fns_star[1];
+	  if (pma2_flag) Cout << " [B* - bar-B*]\n";
+	  else           Cout << " [u'u - B^2]\n";
+	}
+
+	// Update MPP search data
+	update_mpp_search_data(vars_star, resp_star);
+
+      } // end AMV+ while loop
+
+      // Update response/probability/reliability level data
+      update_level_data();
+
+      statCount++;
+    } // end loop over levels
+  } // end loop over response fns
+
+  // Update warm-start data
+  if (warmStartFlag && subIteratorFlag) // view->copy
+    copy_data(iteratedModel.inactive_continuous_variables(), prevICVars);
+
+  // This function manages either component or system reliability metrics
+  // via post-processing of computed{Resp,Prob,Rel,GenRel}Levels
+  update_final_statistics();
+
+  // restore in case of recursion
+  nondLocRelInstance = prev_instance;
+}
+
+
+/** An initial first- or second-order Taylor-series approximation is
+    required for MV/AMV/AMV+/TANA or for the case where momentStats
+    (from MV) are required within finalStatistics for subIterator
+    usage of NonDLocalReliability. */
+void NonDLocalReliability::initial_taylor_series()
+{
+  bool init_ts_flag = (mppSearchType < NO_APPROX); // updated below
+  const String& hess_type = iteratedModel.hessian_type();
+  size_t i, j, k;
+  ShortArray asrv(numFunctions, 0);
+  short mode = 3;
+  if (taylorOrder == 2 && hess_type != "quasi") // no data yet in quasiHess
+    mode |= 4;
+
+  const ShortArray& final_asv = finalStatistics.active_set_request_vector();
+  switch (mppSearchType) {
+  case MV:
+    asrv.assign(numFunctions, mode);
+    break;
+  case AMV_X:      case AMV_U:
+  case AMV_PLUS_X: case AMV_PLUS_U:
+  case TANA_X:     case TANA_U:
+    for (i=0; i<numFunctions; ++i)
+      if (!requestedRespLevels[i].empty() || !requestedProbLevels[i].empty() ||
+	  !requestedRelLevels[i].empty()  || !requestedGenRelLevels[i].empty() )
+	asrv[i] = mode;
+    // no break: fall through
+  case NO_APPROX:
+    if (subIteratorFlag) {
+      // check final_asv for active mean and std deviation stats
+      size_t cntr = 0;
+      for (i=0; i<numFunctions; i++) {
+	for (j=0; j<2; j++) {
+	  if (final_asv[cntr++]) { // mean, std deviation
+	    asrv[i] = mode;
+	    init_ts_flag = true;
+	  }
+	}
+	cntr += requestedRespLevels[i].length() +
+	  requestedProbLevels[i].length() + requestedRelLevels[i].length() +
+	  requestedGenRelLevels[i].length();
+      }
+    }
+    break;
+  }
+
+  momentStats.shape(2, numFunctions); // init to 0
+  if (init_ts_flag) {
+    bool correlation_flag = natafTransform.x_correlation();
+    // Evaluate response values/gradients at the mean values of the uncertain
+    // vars for the (initial) Taylor series expansion in MV/AMV/AMV+.
+    Cout << "\n>>>>> Evaluating response at mean values\n";
+    if (mppSearchType && mppSearchType < NO_APPROX)
+      uSpaceModel.component_parallel_mode(TRUTH_MODEL);
+    iteratedModel.continuous_variables(natafTransform.x_means());
+    activeSet.request_vector(asrv);
+    iteratedModel.compute_response(activeSet);
+    const Response& curr_resp = iteratedModel.current_response();
+    fnValsMeanX       = curr_resp.function_values();
+    fnGradsMeanX      = curr_resp.function_gradients();
+    if (mode & 4)
+      fnHessiansMeanX = curr_resp.function_hessians();
+
+    // compute the covariance matrix from the correlation matrix
+    RealSymMatrix covariance;
+    const Pecos::RealVector& x_std_devs = natafTransform.x_std_deviations();
+    if (correlation_flag) {
+      covariance.shapeUninitialized(numUncertainVars);
+      const Pecos::RealSymMatrix& x_corr_mat
+	= natafTransform.x_correlation_matrix();
+      for (i=0; i<numUncertainVars; i++) {
+	for (j=0; j<=i; j++) {
+	  covariance(i,j) = x_std_devs[i]*x_std_devs[j]*x_corr_mat(i,j);
+	  //if (i != j)
+	  //  covariance(j,i) = covariance(i,j);
+	}
+      }
+    }
+    else {
+      covariance.shape(numUncertainVars); // inits to 0
+      for (i=0; i<numUncertainVars; i++)
+	covariance(i,i) = std::pow(x_std_devs[i], 2);
+    }
+
+    // MVFOSM computes a first-order mean, which is just the response evaluated
+    // at the input variable means.  If Hessian data is available, compute a
+    // second-order mean including the effect of input variable correlations.
+    // MVFOSM computes a first-order variance including the effect of input
+    // variable correlations.  Second-order variance requires skewness/kurtosis
+    // of the inputs and is not practical.  NOTE: if fnGradsMeanX is zero, then
+    // std_dev will be zero --> bad for MV CDF estimates.
+    bool t2nq = (taylorOrder == 2 && hess_type != "quasi"); // 2nd-order mean
+    for (i=0; i<numFunctions; ++i) {
+      if (asrv[i]) {
+	Real& mean = momentStats(0,i); Real& std_dev = momentStats(1,i);
+	mean = fnValsMeanX[i]; // first-order mean
+	Real v1 = 0., v2 = 0.;
+	for (j=0; j<numUncertainVars; ++j) {
+	  Real fn_grad_ji = fnGradsMeanX(j,i);
+	  if (correlation_flag)
+	    for (k=0; k<numUncertainVars; ++k) {
+	      Real cov_jk = covariance(j,k);
+	      if (t2nq) v1 += cov_jk * fnHessiansMeanX[i](j,k);
+	      v2 += cov_jk * fn_grad_ji * fnGradsMeanX(k,i);
+	    }
+	  else {
+	    Real cov_jj = covariance(j,j);
+	    if (t2nq) v1 += cov_jj * fnHessiansMeanX[i](j,j);
+	    v2 += cov_jj * std::pow(fn_grad_ji, 2);
+	  }
+	}
+	if (t2nq) mean += v1/2.;
+	std_dev = std::sqrt(v2);
+      }
+    }
+
+    // Teuchos/BLAS-based approach.  As a matrix triple-product, this has some
+    // unneeded FLOPs.  A vector-matrix triple product would be preferable, but
+    // requires vector extractions from fnGradsMeanX.
+    //RealSymMatrix variance(numFunctions, false);
+    //Teuchos::symMatTripleProduct(Teuchos::NO_TRANS, 1., covariance,
+    //                             fnGradsMeanX, variance);
+    //for (i=0; i<numFunctions; i++)
+    //  momentStats(1,i) = sqrt(variance(i,i));
+    //Cout << "\nvariance = " << variance << "\nmomentStats = " << momentStats;
+  }
+}
+
+
+/** Initialize class-scope arrays and perform other start-up
+    activities, such as evaluating median limit state responses. */
+void NonDLocalReliability::initialize_class_data()
+{
+  // Initialize class-scope arrays used to warm-start multiple reliability
+  // analyses within a strategy such as bi-level/sequential RBDO.  Cannot be
+  // performed in constructor due to late availability of subIteratorFlag.
+  if (warmStartFlag && subIteratorFlag && numRelAnalyses == 0) {
+    size_t num_final_grad_vars
+      = finalStatistics.active_set_derivative_vector().size();
+    prevMPPULev0.resize(numFunctions);
+    prevCumASVLev0.assign(numFunctions, 0);
+    prevFnGradDLev0.shape(num_final_grad_vars, numFunctions);
+    prevFnGradULev0.shape(numUncertainVars, numFunctions);
+  }
+
+  // define ranVarMeansU for use in the transformed AMV option
+  //if (mppSearchType == AMV_U)
+  natafTransform.trans_X_to_U(natafTransform.x_means(), ranVarMeansU);
+  // must follow transform_correlations()
+
+  /*
+  // Determine median limit state values for AMV/AMV+/FORM/SORM by evaluating
+  // response fns at u = 0 (used for determining signs of reliability indices).
+  Cout << "\n>>>>> Evaluating response at median values\n";
+  if (mppSearchType && mppSearchType < NO_APPROX)
+    uSpaceModel.component_parallel_mode(TRUTH_MODEL);
+  RealVector ep_median_u(numUncertainVars), // inits vals to 0
+             ep_median_x(numUncertainVars, false);
+  natafTransform.trans_U_to_X(ep_median_u, ep_median_x);
+  iteratedModel.continuous_variables(ep_median_x);
+  activeSet.request_values(0); // initialize
+  for (size_t i=0; i<numFunctions; i++)
+    if (!requestedRespLevels[i].empty() || !requestedProbLevels[i].empty() ||
+	!requestedRelLevels[i].empty()  || !requestedGenRelLevels[i].empty())
+      activeSet.request_value(1, i); // only fn vals needed at median unc vars
+  iteratedModel.compute_response(activeSet);
+  medianFnVals = iteratedModel.current_response().function_values();
+  */
+
+  // now that vars/labels/bounds/targets have flowed down at run-time from any
+  // higher level recursions, propagate them up the instantiate-on-the-fly
+  // Model recursion so that they are correct when they propagate back down.
+  mppModel.update_from_subordinate_model(); // recurse_flag = true
+
+  // set up the x-space data within the importance sampler
+  if (integrationRefinement) { // IS/AIS/MMAIS
+    // rep needed for access to functions not mapped to Iterator level
+    NonDAdaptImpSampling* importance_sampler_rep
+      = (NonDAdaptImpSampling*)importanceSampler.iterator_rep();
+    importance_sampler_rep->initialize_random_variables(natafTransform);
+  }
+}
+
+
+/** For a particular response function prior to the first z/p/beta level,
+    initialize/warm-start optimizer initial guess (initialPtU),
+    expansion point (mostProbPointX/U), and associated response
+    data (computedRespLevel, fnGradX/U, and fnHessX/U). */
+void NonDLocalReliability::initialize_level_data()
+{
+  // All reliability methods need initialization of initialPtU; AMV/AMV+/TANA
+  // methods additionally need initialization of fnGradX/U; and AMV+/TANA
+  // methods additionally need initialization of mostProbPointX/U and
+  // computedRespLevel.
+
+  // If warm-starting across multiple NonDLocalReliability invocations (e.g.,
+  // for modified design parameters in RBDO), warm-start using the level 0 final
+  // results for the corresponding response fn.  For all subsequent levels,
+  // the warm-start procedure is self-contained (i.e., no data from the
+  // previous NonDLocalReliability invocation is used).
+
+  // For AMV+ across multiple NonDLocalReliability invocations, the previous
+  // level 0 converged MPP provides the basis for the initial expansion point.
+  // If inactive variable design sensitivities are available, a projection
+  // from the previous MPP is used.  In either case, re-evaluation of response
+  // data is required to capture the effect of inactive variable changes
+  // (design variables in RBDO).  Since the mean expansion at the new d
+  // has already been computed, one could also use this since it may predict
+  // an MPP estimate (after one opt cycle, prior to the expense of the second
+  // expansion evaluation) as good as the converged/projected MPP at the
+  // old d.  However, the former approach has been observed to be preferable
+  // in practice (even without projection).
+
+  if (warmStartFlag && subIteratorFlag && numRelAnalyses) {
+    // level 0 of each response fn in subsequent UQ analysis: initial
+    // optimizer guess and initial expansion point are the converged
+    // MPP from the previous UQ analysis, for which the computedRespLevel
+    // and fnGradX/U must be re-evaluated due to design variable changes.
+
+    // simplest approach
+    initialPtU = prevMPPULev0[respFnCount]; // AMV/AMV+/FORM
+
+    // If inactive var sensitivities are available, then apply a correction
+    // to initialPtU using a design sensitivity projection (Burton & Hajela).
+    // Note 1: only valid for RIA.
+    // Note 2: when the top level RBDO optimizer is performing a value-based
+    // line search, it is possible for prevFnGradDLev0 to be older than
+    // prevICVars/prevMPPULev0/prevFnGradULev0.  In testing, this appears to
+    // be OK and preferable to bypassing the projection when prevFnGradDLev0
+    // is out of date (which is why the previous ASV test is cumulative: if
+    // prevFnGradDLev0 has been populated, use it whether or not it was from
+    // the last analysis).
+    bool inactive_grads = (prevCumASVLev0[respFnCount] & 2)    ? true : false;
+    bool lev0_ria = (requestedRespLevels[respFnCount].empty()) ? false : true;
+    if (inactive_grads && lev0_ria) {
+      RealVector fn_grad_d = Teuchos::getCol(Teuchos::View, prevFnGradDLev0,
+                                             respFnCount);
+      RealVector fn_grad_u = Teuchos::getCol(Teuchos::View, prevFnGradULev0,
+                                             respFnCount);
+      const RealVector& d_k_plus_1
+	= iteratedModel.inactive_continuous_variables(); // view
+      Real grad_d_delta_d = 0., norm_grad_u_sq = 0.;
+      size_t i, num_icv = d_k_plus_1.length();
+      for (i=0; i<num_icv; i++)
+	grad_d_delta_d += fn_grad_d[i]*( d_k_plus_1[i] - prevICVars[i] );
+      for (i=0; i<numUncertainVars; i++)
+	norm_grad_u_sq += std::pow(fn_grad_u[i], 2);
+      Real factor = grad_d_delta_d / norm_grad_u_sq;
+      for (i=0; i<numUncertainVars; i++)
+	initialPtU[i] -= factor * fn_grad_u[i];
+    }
+
+    if (mppSearchType == AMV_X || mppSearchType == AMV_U) {
+      // Reevaluation for new des vars already performed at uncertain var means
+      // in initial_taylor_series()
+      assign_mean_data();
+    }
+    else if (mppSearchType == AMV_PLUS_X || mppSearchType == AMV_PLUS_U ||
+	     mppSearchType == TANA_X     || mppSearchType == TANA_U) {
+      mostProbPointU = initialPtU;
+      natafTransform.trans_U_to_X(mostProbPointU, mostProbPointX);
+      if (inactive_grads)
+	Cout << "\n>>>>> Evaluating new response at projected MPP\n";
+      else
+	Cout << "\n>>>>> Evaluating new response at previous MPP\n";
+      uSpaceModel.component_parallel_mode(TRUTH_MODEL);
+      // set active/uncertain vars augmenting inactive design vars
+      iteratedModel.continuous_variables(mostProbPointX);
+      short mode = (taylorOrder == 2) ? 7 : 3;
+      activeSet.request_values(0); activeSet.request_value(mode, respFnCount);
+
+      iteratedModel.compute_response(activeSet);
+      const Response& curr_resp = iteratedModel.current_response();
+      computedRespLevel = curr_resp.function_value(respFnCount);
+      fnGradX = curr_resp.function_gradient_copy(respFnCount);
+
+      SizetMultiArrayConstView cv_ids = iteratedModel.continuous_variable_ids();
+      SizetArray x_dvv; copy_data(cv_ids, x_dvv);
+      natafTransform.trans_grad_X_to_U(fnGradX, fnGradU, mostProbPointX,
+				       x_dvv, cv_ids);
+      if (mode & 4) {
+	fnHessX = curr_resp.function_hessian(respFnCount);
+	natafTransform.trans_hess_X_to_U(fnHessX, fnHessU, mostProbPointX,
+					 fnGradX, x_dvv, cv_ids);
+	curvatureDataAvailable = true; kappaUpdated = false;
+      }
+    }
+  }
+  else { // level 0 of each response fn in first or only UQ analysis
+
+    // initial fnGradX/U for AMV/AMV+ = grads at mean x values, initial
+    // expansion point for AMV+ = mean x values.
+    if (mppSearchType < NO_APPROX) { // AMV/AMV+/TANA
+      // update mostProbPointX/U, computedRespLevel, fnGradX/U, fnHessX/U
+      assign_mean_data();
+#ifdef MPP_CONVERGE_RATE
+      if (mppSearchType >= AMV_PLUS_X)
+	Cout << "u'u = "  << mostProbPointU.dot(mostProbPointU)
+	     << " G(u) = " << computedRespLevel << '\n';
+#endif // MPP_CONVERGE_RATE
+    }
+
+    // initial optimizer guess in u-space (initialPtU)
+    initialPtU = initialPtUSpec; // initialPtUSpec set in ctor
+
+    /*
+    // fall back if projection is unavailable (or numerics don't work out).
+    initialPtU = (ria_flag) ? initialPtUSpec :
+      std::fabs(requestedCDFRelLevel)/std::sqrt((Real)numUncertainVars);
+
+    // if fnValsMeanX/fnGradU are available, then warm start initialPtU using
+    // a projection from the means.
+    if (warmStartFlag && mv_flag) {
+      Real alpha, norm_grad_u_sq = fnGradU.dot(fnGradU);
+      if (ria_flag) {
+	// use same projection idea as for a z level change,
+	// but project from means
+	if ( norm_grad_u_sq > 1.e-10 ) {
+	  alpha = (requestedRespLevel - fnValsMeanX[respFnCount])
+	        / norm_grad_u_sq;
+	  for (i=0; i<numUncertainVars; i++)
+	    initialPtU[i] = ranVarMeansU[i] + alpha*fnGradU[i];
+	}
+      }
+      else { // pma
+	// the simple projection for a beta level change does not work here
+	// since beta at means will be near-zero (zero if x-space is normally
+	// distributed).  Therefore, solve for the alpha value in
+	// u = u_mean + alpha*dg/du which yields ||u|| = beta.  This requires
+	// solving the quadratic expression a alpha^2 + b alpha + c = 0 with
+	// a = dg/du^T dg/du, b = 2 dg/du^T u_mean, and
+	// c = u_mean^T u_mean - beta^2
+	Real b = 2. * ranVarMeansU.dot(fnGradU),
+	     c = ranVarMeansU.dot(ranVarMeansU) - pow(requestedCDFRelLevel, 2);
+	Real b2m4ac = b*b - 4.*norm_grad_u_sq*c;
+	if (b2m4ac >= 0. && norm_grad_u_sq > 1.e-10) {
+	  Real alpha1 = (-b + std::sqrt(b2m4ac))/2./norm_grad_u_sq,
+	       alpha2 = (-b - std::sqrt(b2m4ac))/2./norm_grad_u_sq,
+	       g_est1 = fnValsMeanX[respFnCount] + alpha1*norm_grad_u_sq;
+	  // Select the correct root based on sign convention involving beta
+	  // and relationship of projected G to G(0):
+	  if (requestedCDFRelLevel >= 0.)
+	    // if beta_cdf >= 0, then projected G should be <= G(0)
+	    alpha = (g_est1 <= medianFnVals[respFnCount]) ? alpha1 : alpha2;
+	  else
+	    // if beta_cdf <  0, then projected G should be >  G(0)
+	    alpha = (g_est1 >  medianFnVals[respFnCount]) ? alpha1 : alpha2;
+	  for (i=0; i<numUncertainVars; i++)
+	    initialPtU[i] = ranVarMeansU[i] + alpha*fnGradU[i];
+	}
+      }
+    }
+    */
+  }
+
+  // Create the initial Taylor series approximation used by AMV/AMV+/TANA
+  if (mppSearchType < NO_APPROX) {
+    // restrict the approximation index set
+    IntSet surr_fn_indices;
+    surr_fn_indices.insert(respFnCount);
+    uSpaceModel.surrogate_function_indices(surr_fn_indices);
+    // construct the approximation
+    update_limit_state_surrogate();
+  }
+}
+
+
+/** For a particular response function at a particular z/p/beta level,
+    warm-start or reset the optimizer initial guess (initialPtU),
+    expansion point (mostProbPointX/U), and associated response
+    data (computedRespLevel, fnGradX/U, and fnHessX/U). */
+void NonDLocalReliability::initialize_mpp_search_data()
+{
+  if (warmStartFlag) {
+    // For subsequent levels (including an RIA to PMA switch), warm start by
+    // using the MPP from the previous level as the initial expansion
+    // point.  The initial guess for the next MPP optimization is warm
+    // started either by a simple copy of the MPP in the case of unconverged
+    // AMV+ iterations (see AMV+ convergence assessment below) or, in the
+    // case of an advance to the next level, by projecting from the current
+    // MPP out to the new beta radius or response level.
+    // NOTE: premature opt. termination can occur if the RIA/PMA 1st-order
+    // optimality conditions (u + lamba*grad_g = 0 or grad_g + lambda*u = 0)
+    // remain satisfied for the new level, even though the new equality
+    // constraint will be violated.  The projection addresses this concern.
+
+    // No action is required for warm start of mostProbPointX/U, fnGradX/U,
+    // and computedRespLevel (not indexed by level)
+
+    // Warm start initialPtU for the next level using a projection.
+    size_t rl_len = requestedRespLevels[respFnCount].length();
+    if (levelCount < rl_len) {
+      // For RIA case, project along fnGradU to next g level using
+      // linear Taylor series:  g2 = g1 + dg/du^T (u2 - u1) where
+      // u2 - u1 = alpha*dg/du gives alpha = (g2 - g1)/(dg/du^T dg/du).
+      // NOTE 1: the requested and computed response levels will differ in
+      // the AMV case.  While the previous computed response level could be
+      // used in the alpha calculation, the ratio of requested levels should
+      // be a more accurate predictor of the next linearized AMV MPP.
+      // NOTE 2: this projection could bypass the need for fnGradU with
+      // knowledge of the Lagrange multipliers at the previous MPP
+      // (u + lamba*grad_g = 0 or grad_g + lambda*u = 0).
+      Real norm_grad_u_sq = fnGradU.dot(fnGradU);
+      if ( norm_grad_u_sq > 1.e-10 ) { // also handles NPSOL numerical case
+	Real alpha = (requestedTargetLevel -
+          requestedRespLevels[respFnCount][levelCount-1])/norm_grad_u_sq;
+	for (size_t i=0; i<numUncertainVars; i++)
+	  initialPtU[i] = mostProbPointU[i] + alpha * fnGradU[i];
+      }
+      else
+	initialPtU = initialPtUSpec;//mostProbPointU: premature conv w/ some opt
+    }
+    else {
+      // For PMA case, scale mostProbPointU so that its magnitude equals
+      // the next beta_target.
+      // NOTE 1: use of computed levels instead of requested levels handles
+      // an RIA/PMA switch (the observed reliability from the RIA soln is
+      // scaled to the requested reliability of the next PMA level).
+      // NOTE 2: requested and computed reliability levels should agree very
+      // closely in all cases since it is the g term that is linearized, not the
+      // u'u term (which defines beta).  However, if the optimizer fails to
+      // satisfy the PMA constraint, then using the computed level is preferred.
+      //Real prev_pl = (levelCount == rl_len)
+      //  ? computedProbLevels[respFnCount][levelCount-1]
+      //  : requestedProbLevels[respFnCount][levelCount-rl_len-1];
+      size_t pl_len = requestedProbLevels[respFnCount].length(),
+	     bl_len = requestedRelLevels[respFnCount].length();
+      Real prev_bl = ( integrationOrder == 2 &&
+		       ( levelCount <  rl_len + pl_len ||
+			 levelCount >= rl_len + pl_len + bl_len ) ) ?
+	computedGenRelLevels[respFnCount][levelCount-1] :
+	computedRelLevels[respFnCount][levelCount-1];
+      // Note: scaling is applied to mppU, so we want best est of new beta.
+      // Don't allow excessive init pt scaling if secant Hessian updating.
+      Real high_tol = 1.e+3,
+	low_tol = ( ( taylorOrder == 2 || integrationOrder == 2 ) &&
+		    iteratedModel.hessian_type() == "quasi" ) ? 1.e-3 : 1.e-10;
+      if ( std::abs(prev_bl) > low_tol && std::abs(prev_bl) < high_tol &&
+	   std::abs(requestedTargetLevel) > low_tol &&
+	   std::abs(requestedTargetLevel) < high_tol ) {
+	// CDF or CCDF does not matter for scale_factor so long as it is
+	// consistent (CDF/CDF or CCDF/CCDF).
+	Real scale_factor = requestedTargetLevel / prev_bl;
+#ifdef DEBUG
+	Cout << "PMA warm start: previous = " << prev_bl
+	     << " current = " << requestedTargetLevel
+	     << " scale_factor = " << scale_factor << std::endl;
+#endif // DEBUG
+	for (size_t i=0; i<numUncertainVars; i++)
+	  initialPtU[i] = mostProbPointU[i] * scale_factor;
+      }
+      else
+	initialPtU = initialPtUSpec;//mostProbPointU: premature conv w/ some opt
+    }
+  }
+  else { // cold start: reset to mean inputs/outputs
+    // initial fnGradX/U for AMV/AMV+ = grads at mean x values, initial
+    // expansion point for AMV+ = mean x values.
+    if (mppSearchType < NO_APPROX) // AMV/AMV+/TANA
+      assign_mean_data();
+    // initial optimizer guess in u-space (initialPtU)
+    initialPtU = initialPtUSpec; // initialPtUSpec set in ctor
+  }
+}
+
+
+/** Includes case-specific logic for updating MPP search data for the
+    AMV/AMV+/TANA/NO_APPROX methods. */
+void NonDLocalReliability::
+update_mpp_search_data(const Variables& vars_star, const Response& resp_star)
+{
+  size_t rl_len = requestedRespLevels[respFnCount].length(),
+         pl_len = requestedProbLevels[respFnCount].length(),
+         bl_len = requestedRelLevels[respFnCount].length();
+  bool ria_flag = (levelCount < rl_len);
+  const RealVector&    mpp_u = vars_star.continuous_variables(); // view
+  const RealVector& fns_star = resp_star.function_values();
+
+  // Update MPP arrays from optimization results
+  Real conv_metric;
+  switch (mppSearchType) {
+  case AMV_PLUS_X: case AMV_PLUS_U: case TANA_X: case TANA_U:
+    RealVector del_u(numUncertainVars, false);
+    for (size_t i=0; i<numUncertainVars; i++)
+      del_u[i] = mpp_u[i] - mostProbPointU[i];
+    conv_metric = del_u.normFrobenius();
+    break;
+  }
+  copy_data(mpp_u, mostProbPointU); // view -> copy
+  natafTransform.trans_U_to_X(mostProbPointU, mostProbPointX);
+
+  // Set computedRespLevel to the current g(x) value by either performing
+  // a validation function evaluation (AMV/AMV+) or retrieving data from
+  // resp_star (FORM).  Also update approximations and convergence tols.
+  SizetMultiArrayConstView cv_ids = iteratedModel.continuous_variable_ids();
+  SizetArray x_dvv; copy_data(cv_ids, x_dvv);
+  switch (mppSearchType) {
+  case AMV_X: case AMV_U: {
+    approxConverged = true; // break out of while loop
+    uSpaceModel.component_parallel_mode(TRUTH_MODEL);
+    activeSet.request_values(0); activeSet.request_value(1, respFnCount);
+    iteratedModel.continuous_variables(mostProbPointX);
+    iteratedModel.compute_response(activeSet);
+    computedRespLevel
+      = iteratedModel.current_response().function_value(respFnCount);
+    break;
+  }
+  case AMV_PLUS_X: case AMV_PLUS_U: case TANA_X: case TANA_U: {
+    // Assess AMV+/TANA iteration convergence.  ||del_u|| is not a perfect
+    // metric since cycling between MPP estimates can occur.  Therefore,
+    // a maximum number of iterations is also enforced.
+    //conv_metric = std::fabs(fn_vals[respFnCount] - requestedRespLevel);
+    ++approxIters;
+    if (conv_metric < convergenceTol)
+      approxConverged = true;
+    else if (approxIters >= maxIterations) {
+      Cerr << "\nWarning: maximum number of limit state approximation cycles "
+	   << "exceeded.\n";
+      warningBits |= 1; // first warning in output summary
+      approxConverged = true;
+    }
+    // Update response data for local/multipoint MPP approximation
+    short mode = 1;
+    if (approxConverged) {
+      Cout << "\n>>>>> Approximate MPP iterations converged.  "
+	   << "Evaluating final response.\n";
+      // fnGradX/U needed for warm starting by projection, final_stat_grad,
+      // and/or 2nd-order integration.
+      const ShortArray& final_asv = finalStatistics.active_set_request_vector();
+      if ( warmStartFlag || ( final_asv[statCount] & 2 ) )
+	mode |= 2;
+      if (integrationOrder == 2) {
+	mode |= 4;
+	if (numUncertainVars != numNormalVars)
+	  mode |= 2; // fnGradX needed to transform fnHessX to fnHessU
+      }
+    }
+    else { // not converged
+      Cout << "\n>>>>> Updating approximation for MPP iteration "
+	   << approxIters+1 << "\n";
+      mode |= 2;            // update AMV+/TANA approximation
+      if (taylorOrder == 2) // update AMV^2+ approximation
+	mode |= 4;
+      if (warmStartFlag) // warm start initialPtU for next AMV+ iteration
+	initialPtU = mostProbPointU;
+    }
+    // evaluate new expansion point
+    uSpaceModel.component_parallel_mode(TRUTH_MODEL);
+    activeSet.request_values(0);
+    activeSet.request_value(mode, respFnCount);
+    iteratedModel.continuous_variables(mostProbPointX);
+    iteratedModel.compute_response(activeSet);
+    const Response& curr_resp = iteratedModel.current_response();
+    computedRespLevel = curr_resp.function_value(respFnCount);
+#ifdef MPP_CONVERGE_RATE
+    Cout << "u'u = "  << mostProbPointU.dot(mostProbPointU)
+	 << " G(u) = " << computedRespLevel << '\n';
+#endif // MPP_CONVERGE_RATE
+    if (mode & 2) {
+      fnGradX = curr_resp.function_gradient_copy(respFnCount);
+      natafTransform.trans_grad_X_to_U(fnGradX, fnGradU, mostProbPointX,
+				       x_dvv, cv_ids);
+    }
+    if (mode & 4) {
+      fnHessX = curr_resp.function_hessian(respFnCount);
+      natafTransform.trans_hess_X_to_U(fnHessX, fnHessU, mostProbPointX,
+				       fnGradX, x_dvv, cv_ids);
+      curvatureDataAvailable = true; kappaUpdated = false;
+    }
+
+    // Update the limit state surrogate model
+    update_limit_state_surrogate();
+
+    // Update pmaMaximizeG if 2nd-order PMA for specified p / beta* level
+    if ( !approxConverged && !ria_flag && integrationOrder == 2 )
+      update_pma_maximize(mostProbPointU, fnGradU, fnHessU);
+
+    break;
+  }
+  case NO_APPROX: { // FORM/SORM
+
+    // direct optimization converges to MPP: no new approximation to compute
+    approxConverged = true; // break out of while loop
+    if (ria_flag) // RIA computed response = eq_con_star + response target
+      computedRespLevel = fns_star[1] + requestedTargetLevel;
+    else          // PMA computed response = +/- obj_fn_star
+      computedRespLevel = (pmaMaximizeG) ? -fns_star[0] : fns_star[0];
+
+    // fnGradX/U needed for warm starting by projection, final_stat_grad, and/or
+    // 2nd-order integration (for nonlinear transformations), and should be
+    // retrievable from previous evals.  If second-order integration for RIA,
+    // fnHessX/U also needed, but not retrievable.  If second-order PMA with
+    // specified p-level, Hessian should be retrievable since it was computed
+    // during the update of requestedCDFRelLevel from requestedCDFProbLevel.
+    // When data should be retrievable, we cannot in general assume that the
+    // last grad/Hessian eval corresponds to the converged MPP; therefore, we
+    // use a DB search.  If the DB search fails (e.g., the eval cache is
+    // deactivated), then we resort to reevaluation.
+    short mode = 0, found_mode = 0; // computedRespLevel already retrieved
+    const ShortArray& final_asv = finalStatistics.active_set_request_vector();
+    if ( warmStartFlag || ( final_asv[statCount] & 2 ) )
+      mode |= 2;
+    if ( integrationOrder == 2 ) {// apply 2nd-order integr in all RIA/PMA cases
+      mode |= 4;
+      if (numUncertainVars != numNormalVars)
+	mode |= 2; // fnGradX needed to transform fnHessX to fnHessU
+    }
+
+    // retrieve previously evaluated gradient information, if possible
+    if (mode & 2) { // avail in all RIA/PMA cases (exception: numerical grads)
+      // query data_pairs to retrieve the fn gradient at the MPP
+      Variables search_vars = iteratedModel.current_variables().copy();
+      search_vars.continuous_variables(mostProbPointX);
+      ActiveSet search_set = resp_star.active_set();
+      ShortArray search_asv(numFunctions, 0);
+      search_asv[respFnCount] = 2;
+      search_set.request_vector(search_asv);
+      PRPCacheHIter cache_it = lookup_by_val(data_pairs,
+	iteratedModel.interface_id(), search_vars, search_set);
+      if (cache_it != data_pairs.get<hashed>().end()) {
+	fnGradX = cache_it->response().function_gradient_copy(respFnCount);
+	found_mode |= 2;
+      }
+    }
+    // retrieve previously evaluated Hessian information, if possible
+    // > RIA and std PMA beta-level: Hessian not avail since not yet evaluated
+    // > PMA p-level and generalized beta-level: Hessian should be available
+    if ( ( mode & 4 ) && !ria_flag &&
+	 ( levelCount <  rl_len + pl_len ||
+	   levelCount >= rl_len + pl_len + bl_len ) ) {
+      // query data_pairs to retrieve the fn Hessian at the MPP
+      Variables search_vars = iteratedModel.current_variables().copy();
+      search_vars.continuous_variables(mostProbPointX);
+      ActiveSet search_set = resp_star.active_set();
+      ShortArray search_asv(numFunctions, 0);
+      search_asv[respFnCount] = 4;
+      search_set.request_vector(search_asv);
+      PRPCacheHIter cache_it = lookup_by_val(data_pairs,
+	iteratedModel.interface_id(), search_vars, search_set);
+      if (cache_it != data_pairs.get<hashed>().end()) {
+        fnHessX = cache_it->response().function_hessian(respFnCount);
+	found_mode |= 4;
+      }
+    }
+    // evaluate any remaining required data which could not be retrieved
+    short remaining_mode = mode - found_mode;
+    if (remaining_mode) {
+      Cout << "\n>>>>> Evaluating limit state derivatives at MPP\n";
+      iteratedModel.continuous_variables(mostProbPointX);
+      activeSet.request_values(0);
+      activeSet.request_value(remaining_mode, respFnCount);
+      iteratedModel.compute_response(activeSet);
+      const Response& curr_resp = iteratedModel.current_response();
+      if (remaining_mode & 2)
+	fnGradX = curr_resp.function_gradient_copy(respFnCount);
+      if (remaining_mode & 4)
+        fnHessX = curr_resp.function_hessian(respFnCount);
+    }
+    if (mode & 2)
+      natafTransform.trans_grad_X_to_U(fnGradX, fnGradU, mostProbPointX,
+				       x_dvv, cv_ids);
+    if (mode & 4) {
+      natafTransform.trans_hess_X_to_U(fnHessX, fnHessU, mostProbPointX,
+				       fnGradX, x_dvv, cv_ids);
+      curvatureDataAvailable = true; kappaUpdated = false;
+    }
+    break;
+  }
+  }
+
+  // set computedRelLevel using u'u from fns_star; must follow fnGradU update
+  if (ria_flag)
+    computedRelLevel = signed_norm(std::sqrt(fns_star[0]));
+  else if (integrationOrder == 2) { // second-order PMA
+    // no op: computed{Rel,GenRel}Level updated in PMA2_constraint_eval()
+  }
+  else { // first-order PMA
+    Real norm_u_sq = fns_star[1] + std::pow(requestedTargetLevel, 2);
+    computedRelLevel = signed_norm(std::sqrt(norm_u_sq));
+  }
+}
+
+
+/** Updates computedRespLevels/computedProbLevels/computedRelLevels,
+    finalStatistics, warm start, and graphics data. */
+void NonDLocalReliability::update_level_data()
+{
+  // local reliability data aren't output to tabular, so send directly
+  // to graphics window only
+  Graphics& dakota_graphics = parallelLib.output_manager().graphics();
+
+  bool ria_flag = (levelCount < requestedRespLevels[respFnCount].length());
+
+  // Update computed Resp/Prob/Rel/GenRel levels arrays.  finalStatistics
+  // is updated within update_final_statistics() for all resp fns & levels.
+  computedRespLevels[respFnCount][levelCount] = computedRespLevel;
+  computedRelLevels[respFnCount][levelCount]  = computedRelLevel;
+  Real computed_prob_level;
+  if (!ria_flag && integrationOrder == 2) {
+    computedGenRelLevels[respFnCount][levelCount] = computedGenRelLevel;
+    computedProbLevels[respFnCount][levelCount] = computed_prob_level =
+      probability(computedGenRelLevel);
+  }
+  else {
+    computedProbLevels[respFnCount][levelCount] = computed_prob_level =
+      probability(computedRelLevel, cdfFlag, mostProbPointU, fnGradU, fnHessU);
+    computedGenRelLevels[respFnCount][levelCount] = computedGenRelLevel =
+      reliability(computed_prob_level);
+  }
+
+  // Final statistic gradients are dz/ds, dbeta/ds, or dp/ds
+  const ShortArray& final_asv = finalStatistics.active_set_request_vector();
+  bool system_grad_contrib = false;
+  if (respLevelTargetReduce &&
+      levelCount < requestedRespLevels[respFnCount].length()) {
+    size_t sys_stat_count = 2*numFunctions + totalLevelRequests + levelCount;
+    if (final_asv[sys_stat_count] & 2)
+      system_grad_contrib = true;
+  }
+  if ( (final_asv[statCount] & 2) || system_grad_contrib ) {
+
+    // evaluate dg/ds at the MPP and store in final_stat_grad
+    RealVector final_stat_grad;
+    dg_ds_eval(mostProbPointX, fnGradX, final_stat_grad);
+
+    // for warm-starting next run
+    if (warmStartFlag && subIteratorFlag && levelCount == 0)
+      Teuchos::setCol(final_stat_grad, respFnCount, prevFnGradDLev0);
+
+    // RIA: sensitivity of beta/p/beta* w.r.t. inactive variables
+    //   dbeta/ds     = 1/norm_grad_u * dg/ds       (first-order)
+    //   dp/ds        = -phi(-beta) * dbeta/ds      (first-order)
+    //   dp_2/ds      = [Phi(-beta_corr)*sum - phi(-beta_corr)*prod] * dbeta/ds
+    //                                              (second-order)
+    //   dbeta*/ds    = -1/phi(-beta*) * dp_2/ds    (second-order)
+    // PMA: sensitivity of g function w.r.t. inactive variables
+    //   dz/ds        = dg/ds
+    if (ria_flag) {
+      // beta_cdf = -beta_ccdf, p_cdf = 1. - p_ccdf
+      // -->> dbeta_cdf/ds = -dbeta_ccdf/ds, dp_cdf/ds = -dp_ccdf/ds
+      Real norm_grad_u = fnGradU.normFrobenius();
+      // factor for first-order dbeta/ds:
+      Real factor = (cdfFlag) ? 1./norm_grad_u : -1./norm_grad_u;
+      if (integrationOrder == 2 && respLevelTarget != RELIABILITIES) {
+	factor *= dp2_dbeta_factor(computedRelLevel, cdfFlag);
+	// factor for second-order dbeta*/ds
+	if (respLevelTarget == GEN_RELIABILITIES)
+	  factor *= -1. / Pecos::phi(-computedGenRelLevel);
+      }
+      else if (respLevelTarget == PROBABILITIES) // factor for first-order dp/ds
+	factor *= -Pecos::phi(-computedRelLevel);
+
+      // apply factor:
+      size_t i, num_final_grad_vars
+	= finalStatistics.active_set_derivative_vector().size();
+      for (i=0; i<num_final_grad_vars; ++i)
+	final_stat_grad[i] *= factor;
+    }
+    finalStatistics.function_gradient(final_stat_grad, statCount);
+  }
+
+  // Update warm-start data and graphics
+  if (warmStartFlag && subIteratorFlag && levelCount == 0) {
+    // for warm-starting next run
+    prevMPPULev0[respFnCount] = mostProbPointU;
+    prevCumASVLev0[respFnCount] |= final_asv[statCount];
+    for (size_t i=0; i<numUncertainVars; i++)
+      prevFnGradULev0(i,respFnCount) = fnGradU[i];
+  }
+  if (!subIteratorFlag) {
+    dakota_graphics.add_datapoint(respFnCount, computedRespLevel,
+				  computed_prob_level);
+    for (size_t i=0; i<numUncertainVars; i++) {
+      dakota_graphics.add_datapoint(numFunctions+i, computedRespLevel,
+				    mostProbPointX[i]);
+      if (numFunctions > 1 && respFnCount < numFunctions-1 &&
+	  levelCount == requestedRespLevels[respFnCount].length() +
+	                requestedProbLevels[respFnCount].length() +
+	                requestedRelLevels[respFnCount].length() +
+	                requestedGenRelLevels[respFnCount].length() - 1)
+	dakota_graphics.new_dataset(numFunctions+i);
+    }
+  }
+}
+
+
+void NonDLocalReliability::update_limit_state_surrogate()
+{
+  bool x_space = (mppSearchType ==  AMV_X || mppSearchType == AMV_PLUS_X ||
+		  mppSearchType == TANA_X);
+
+  // construct local Variables object
+  Variables mpp_vars(iteratedModel.current_variables().shared_data());
+  if (x_space) mpp_vars.continuous_variables(mostProbPointX);
+  else         mpp_vars.continuous_variables(mostProbPointU);
+
+  // construct Response object
+  ShortArray asv(numFunctions, 0);
+  asv[respFnCount] = (taylorOrder == 2) ? 7 : 3;
+  ActiveSet set;//(numFunctions, numUncertainVars);
+  set.request_vector(asv);
+  set.derivative_vector(iteratedModel.continuous_variable_ids());
+  Response response(SIMULATION_RESPONSE, set);
+  response.function_value(computedRespLevel, respFnCount);
+  if (x_space) {
+    response.function_gradient(fnGradX, respFnCount);
+    if (taylorOrder == 2)
+      response.function_hessian(fnHessX, respFnCount);
+  }
+  else {
+    response.function_gradient(fnGradU, respFnCount);
+    if (taylorOrder == 2)
+      response.function_hessian(fnHessU, respFnCount);
+  }
+  IntResponsePair response_pr(0, response); // dummy eval id
+
+  // After a design variable change, history data (e.g., TANA) needs
+  // to be cleared (build_approximation() only calls clear_current())
+  if (numRelAnalyses && levelCount == 0)
+    uSpaceModel.approximations()[respFnCount].clear_all();
+  // build the new local/multipoint approximation
+  uSpaceModel.build_approximation(mpp_vars, response_pr);
+}
+
+
+void NonDLocalReliability::
+update_pma_maximize(const RealVector& mpp_u, const RealVector& fn_grad_u,
+		    const RealSymMatrix& fn_hess_u)
+{
+  Real p_cdf; bool update = false;
+  size_t rl_len  = requestedRespLevels[respFnCount].length(),
+    rl_pl_len    = rl_len + requestedProbLevels[respFnCount].length(),
+    rl_pl_bl_len = rl_pl_len + requestedRelLevels[respFnCount].length();
+  if (levelCount <  rl_pl_len) {
+    Real p = requestedProbLevels[respFnCount][levelCount-rl_len];
+    p_cdf = (cdfFlag) ? p : 1. - p;
+    update = true;
+  }
+  else if (levelCount >= rl_pl_bl_len) {
+    Real gen_beta = requestedGenRelLevels[respFnCount][levelCount-rl_pl_bl_len];
+    Real gen_beta_cdf = (cdfFlag) ? gen_beta : -gen_beta;
+    p_cdf = probability(gen_beta_cdf);
+    update = true;
+  }
+  if (update) {
+    Real beta_cdf = reliability(p_cdf, true, mpp_u, fn_grad_u, fn_hess_u);
+    pmaMaximizeG = (beta_cdf < 0.);
+  }
+}
+
+
+void NonDLocalReliability::assign_mean_data()
+{
+  const Pecos::RealVector& x_means = natafTransform.x_means();
+  mostProbPointX = x_means;
+  mostProbPointU = ranVarMeansU;
+  computedRespLevel = fnValsMeanX(respFnCount);
+  for (size_t i=0; i<numUncertainVars; i++)
+    fnGradX[i] = fnGradsMeanX(i,respFnCount);
+  SizetMultiArrayConstView cv_ids = iteratedModel.continuous_variable_ids();
+  SizetArray x_dvv; copy_data(cv_ids, x_dvv);
+  natafTransform.trans_grad_X_to_U(fnGradX, fnGradU, x_means, x_dvv, cv_ids);
+  if (taylorOrder == 2 && iteratedModel.hessian_type() != "quasi") {
+    fnHessX = fnHessiansMeanX[respFnCount];
+    natafTransform.trans_hess_X_to_U(fnHessX, fnHessU, x_means, fnGradX,
+				     x_dvv, cv_ids);
+    curvatureDataAvailable = true; kappaUpdated = false;
+  }
+}
+
+
+/** This function recasts a G(u) response set (already transformed and
+    approximated in other recursions) into an RIA objective function. */
+void NonDLocalReliability::
+RIA_objective_eval(const Variables& sub_model_vars,
+		   const Variables& recast_vars,
+		   const Response& sub_model_response,
+		   Response& recast_response)
+{
+  // ----------------------------------------
+  // The RIA objective function is (norm u)^2
+  // ----------------------------------------
+
+  short       asv_val = recast_response.active_set_request_vector()[0];
+  const RealVector& u = recast_vars.continuous_variables();
+  size_t i, num_vars = u.length();
+  if (asv_val & 1) {
+    Real f = 0.;
+    for (i=0; i<num_vars; i++)
+      f += std::pow(u[i], 2); // f = u'u
+    recast_response.function_value(f, 0);
+  }
+  if (asv_val & 2) {
+    RealVector grad_f = recast_response.function_gradient_view(0);
+    for (i=0; i<num_vars; ++i)
+      grad_f[i] = 2.*u[i]; // grad f = 2u
+  }
+  if (asv_val & 4) {
+    RealSymMatrix hess_f = recast_response.function_hessian_view(0);
+    hess_f = 0.;
+    for (i=0; i<num_vars; i++)
+      hess_f(i,i) = 2.; // hess f = 2's on diagonal
+  }
+
+  // Using f = norm u is a poor choice, since grad f is undefined at u = 0.
+  //Real sqrt_sum_sq = std::sqrt(sum_sq);
+  //if (sqrt_sum_sq > 0.)
+  //  grad_f[i] = u[i]/sqrt_sum_sq;
+  //else // gradient undefined at origin, use 0. to keep optimizer happy
+  //  grad_f[i] = 0.;
+}
+
+
+/** This function recasts a G(u) response set (already transformed and
+    approximated in other recursions) into an RIA equality constraint. */
+void NonDLocalReliability::
+RIA_constraint_eval(const Variables& sub_model_vars,
+		    const Variables& recast_vars,
+		    const Response& sub_model_response,
+		    Response& recast_response)
+{
+  // --------------------------------------------------------
+  // The RIA equality constraint is G(u) - response level = 0
+  // --------------------------------------------------------
+
+  short asv_val = recast_response.active_set_request_vector()[1];
+  int   resp_fn = nondLocRelInstance->respFnCount;
+  if (asv_val & 1) {
+    const Real& sub_model_fn = sub_model_response.function_value(resp_fn);
+    recast_response.function_value(
+      sub_model_fn - nondLocRelInstance->requestedTargetLevel, 1);
+  }
+  if (asv_val & 2) // dG/du: no additional transformation needed
+    recast_response.function_gradient(
+      sub_model_response.function_gradient_view(resp_fn), 1);
+  if (asv_val & 4) // d^2G/du^2: no additional transformation needed
+    recast_response.function_hessian(
+      sub_model_response.function_hessian(resp_fn), 1);
+}
+
+
+/** This function recasts a G(u) response set (already transformed and
+    approximated in other recursions) into an PMA objective function. */
+void NonDLocalReliability::
+PMA_objective_eval(const Variables& sub_model_vars,
+		   const Variables& recast_vars,
+		   const Response& sub_model_response,
+		   Response& recast_response)
+{
+  // ----------------------------------
+  // The PMA objective function is G(u)
+  // ----------------------------------
+
+  int   resp_fn    = nondLocRelInstance->respFnCount;
+  short sm_asv_val = sub_model_response.active_set_request_vector()[resp_fn];
+  Real fn; RealVector fn_grad_u; RealSymMatrix fn_hess_u;
+  if (sm_asv_val & 2)
+    fn_grad_u = sub_model_response.function_gradient_view(resp_fn);
+  if (sm_asv_val & 4)
+    fn_hess_u = sub_model_response.function_hessian_view(resp_fn);
+
+  // Due to RecastModel, objective_eval always called before constraint_eval,
+  // so perform NO_APPROX updates here.
+  if (nondLocRelInstance->mppSearchType == NO_APPROX &&
+      nondLocRelInstance->integrationOrder == 2) {
+    nondLocRelInstance->curvatureDataAvailable = true;
+    nondLocRelInstance->kappaUpdated = false; // new fn_{grad,hess}_u data
+    nondLocRelInstance->update_pma_maximize(recast_vars.continuous_variables(),
+					    fn_grad_u, fn_hess_u);
+  }
+
+  short asv_val = recast_response.active_set_request_vector()[0];
+  bool  pma_max = nondLocRelInstance->pmaMaximizeG;
+  if (asv_val & 1) {
+    fn = sub_model_response.function_value(resp_fn);
+    if (pma_max) recast_response.function_value(-fn, 0);
+    else         recast_response.function_value( fn, 0);
+  }
+  if (asv_val & 2) { // dG/du: no additional transformation needed
+    if (pma_max) {
+      RealVector recast_grad = recast_response.function_gradient_view(0);
+      size_t i, num_vars = fn_grad_u.length();
+      for (i=0; i<num_vars; ++i)
+	recast_grad[i] = -fn_grad_u[i];
+    }
+    else
+      recast_response.function_gradient(fn_grad_u, 0);
+  }
+  if (asv_val & 4) { // d^2G/du^2: no additional transformation needed
+    if (pma_max) {
+      RealSymMatrix recast_hess	= recast_response.function_hessian_view(0);
+      size_t i, j, num_vars = fn_hess_u.numRows();
+      for (i=0; i<num_vars; ++i)
+	for (j=0; j<=i; ++j)
+	  recast_hess(i,j) = -fn_hess_u(i,j);
+    }
+    else
+      recast_response.function_hessian(fn_hess_u, 0);
+  }
+
+#ifdef DEBUG
+  if (asv_val & 1)
+    Cout << "PMA_objective_eval(): sub-model function = " << fn << std::endl;
+  if (asv_val & 2) { // dG/du: no additional transformation needed
+    Cout << "PMA_objective_eval(): sub-model gradient:\n";
+    write_data(Cout, fn_grad_u);
+  }
+  if (asv_val & 4) { // d^2G/du^2: no additional transformation needed
+    Cout << "PMA_objective_eval(): sub-model Hessian:\n";
+    write_data(Cout, fn_hess_u, true, true, true);
+  }
+#endif // DEBUG
+}
+
+
+/** This function recasts a G(u) response set (already transformed and
+    approximated in other recursions) into a first-order PMA equality
+    constraint on reliability index beta. */
+void NonDLocalReliability::
+PMA_constraint_eval(const Variables& sub_model_vars,
+		    const Variables& recast_vars,
+		    const Response& sub_model_response,
+		    Response& recast_response)
+{
+  // ------------------------------------------------------
+  // The PMA equality constraint is (norm u)^2 - beta^2 = 0
+  // ------------------------------------------------------
+
+  short       asv_val = recast_response.active_set_request_vector()[1];
+  const RealVector& u = recast_vars.continuous_variables();
+  size_t i, num_vars = u.length();
+  if (asv_val & 1) {
+    // calculate the reliability index (beta)
+    Real beta_sq = 0.;
+    for (i=0; i<num_vars; ++i)
+      beta_sq += std::pow(u[i], 2); //use beta^2 to avoid singular grad @ origin
+    // calculate the equality constraint: u'u - beta_target^2
+    Real c = beta_sq - std::pow(nondLocRelInstance->requestedTargetLevel, 2);
+    recast_response.function_value(c, 1);
+  }
+  if (asv_val & 2) {
+    RealVector grad_f = recast_response.function_gradient_view(1);
+    for (i=0; i<num_vars; ++i)
+      grad_f[i] = 2.*u[i]; // grad f = 2u
+  }
+  if (asv_val & 4) {
+    RealSymMatrix hess_f = recast_response.function_hessian_view(1);
+    hess_f = 0.;
+    for (i=0; i<num_vars; i++)
+      hess_f(i,i) = 2.; // hess f = 2's on diagonal
+  }
+}
+
+
+/** This function recasts a G(u) response set (already transformed and
+    approximated in other recursions) into a second-order PMA equality
+    constraint on generalized reliability index beta-star. */
+void NonDLocalReliability::
+PMA2_constraint_eval(const Variables& sub_model_vars,
+		     const Variables& recast_vars,
+		     const Response& sub_model_response,
+		     Response& recast_response)
+{
+  // -----------------------------------------------------
+  // The PMA SORM equality constraint is beta* = beta*-bar
+  // -----------------------------------------------------
+
+  const RealVector& u = recast_vars.continuous_variables();
+  short    asv_val = recast_response.active_set_request_vector()[1];
+  int      resp_fn = nondLocRelInstance->respFnCount;
+  short sm_asv_val = sub_model_response.active_set_request_vector()[resp_fn];
+  bool         cdf = nondLocRelInstance->cdfFlag;
+
+  // Calculate beta --> p --> beta*.  Use up-to-date mpp/grad/Hessian info,
+  // including surrogate-based data, within signed_norm(), but disallow
+  // surrogate-based curvature corrections due to their sensitivity.  The
+  // presence of fn_grad_u/fn_hess_u data is enforced in PMA2_set_mapping().
+  RealVector fn_grad_u = sub_model_response.function_gradient_view(resp_fn);
+  Real comp_rel = nondLocRelInstance->computedRelLevel =
+    nondLocRelInstance->signed_norm(u, fn_grad_u, cdf);
+  // Don't update curvature correction when nonlinear transformation
+  // induces additional curvature on top of a low-order approximation.
+  // Note: if linear transformation or u-space AMV^2+, then Hessian is
+  // consistent with the previous truth and is constant over the surrogate.
+  Real computed_prob_level = (nondLocRelInstance->mppSearchType == NO_APPROX) ?
+    nondLocRelInstance->probability(comp_rel, cdf, u, fn_grad_u,
+      sub_model_response.function_hessian(resp_fn)) :
+    nondLocRelInstance->probability(comp_rel, cdf,
+      nondLocRelInstance->mostProbPointU, nondLocRelInstance->fnGradU,
+      nondLocRelInstance->fnHessU);
+  Real comp_gen_rel = nondLocRelInstance->computedGenRelLevel =
+    nondLocRelInstance->reliability(computed_prob_level);
+
+  if (asv_val & 1) { // calculate the equality constraint: beta* - bar-beta*
+    Real c = comp_gen_rel - nondLocRelInstance->requestedTargetLevel;
+#ifdef DEBUG
+    Cout << "In PMA2_constraint_eval, beta* = " << comp_gen_rel
+	 << " bar-beta* = " << nondLocRelInstance->requestedTargetLevel
+	 << " eq constr = " << c << std::endl;
+#endif
+    recast_response.function_value(c, 1);
+  }
+  if (asv_val & 2) {
+    // Note: for second-order integrations, beta* is a function of p and
+    // kappa(u).  dbeta*/du involves dkappa/du, but these terms are neglected
+    // as in dbeta*/ds (design sensitivities).
+    //   dbeta/du_i  = u_i/beta (in factor below)
+    //   dp_2/du_i   = [Phi(-beta_corr)*sum - phi(-beta_corr)*prod] * dbeta/du_i
+    //                 (term in brackets computed in dp2_dbeta_factor())
+    //   dbeta*/du_i = -1/phi(-beta*) * dp_2/du (in factor below)
+    Real factor = -nondLocRelInstance->dp2_dbeta_factor(comp_rel, cdf)
+                / comp_rel / Pecos::phi(-comp_gen_rel);
+    size_t i, num_vars = u.length();
+    RealVector grad_f = recast_response.function_gradient_view(1);
+    for (i=0; i<num_vars; ++i)
+      grad_f[i] = factor * u[i];
+#ifdef DEBUG
+    Cout << "In PMA2_constraint_eval, gradient of beta*:\n";
+    write_data(Cout, grad_f);
+#endif
+  }
+  if (asv_val & 4) {
+    Cerr << "Error: Hessian data not supported in NonDLocalReliability::"
+	 << "PMA2_constraint_eval()" << std::endl;
+    abort_handler(-1);
+    /*
+    RealSymMatrix hess_f = recast_response.function_hessian_view(1);
+    hess_f = 0.;
+    for (i=0; i<num_vars; i++)
+      hess_f(i,i) = ;
+    */
+  }
+}
+
+
+void NonDLocalReliability::
+PMA2_set_mapping(const Variables& recast_vars, const ActiveSet& recast_set,
+		 ActiveSet& sub_model_set)
+{
+  // if the constraint value/grad is requested for second-order PMA, then
+  // the sub-model response grad/Hessian are required to update beta*
+  short recast_request = recast_set.request_vector()[1];
+  if (recast_request & 3) { // value/grad request share beta-->p-->beta* calcs
+    int   sm_index          = nondLocRelInstance->respFnCount;
+    short sub_model_request = sub_model_set.request_value(sm_index);
+
+    // all cases require latest fn_grad_u (either truth-based or approx-based)
+    sub_model_request |= 2;
+    // PMA SORM requires latest fn_hess_u (truth-based)
+    if (nondLocRelInstance->mppSearchType == NO_APPROX)
+      sub_model_request |= 4;
+    // else value/grad request utilizes most recent truth fnGradU/fnHessU
+
+    sub_model_set.request_value(sub_model_request, sm_index);
+  }
+}
+
+
+/** Computes dg/ds where s = design variables.  Supports potentially
+    overlapping cases of design variable augmentation and insertion. */
+void NonDLocalReliability::
+dg_ds_eval(const RealVector& x_vars, const RealVector& fn_grad_x,
+	   RealVector& final_stat_grad)
+{
+  const SizetArray& final_dvv = finalStatistics.active_set_derivative_vector();
+  size_t i, num_final_grad_vars = final_dvv.size();
+  if (final_stat_grad.empty())
+    final_stat_grad.resize(num_final_grad_vars);
+
+  // For design vars that are distribution parameters of the uncertain vars,
+  // dg/ds = dg/dx * dx/ds where dx/ds is the design Jacobian.  Since dg/dx is
+  // already available (passed in as fn_grad_x), these sensitivities do not
+  // require additional response evaluations.
+  bool dist_param_deriv = false;
+  size_t num_outer_cv = secondaryACVarMapTargets.size();
+  for (i=0; i<num_outer_cv; i++)
+    if (secondaryACVarMapTargets[i] != Pecos::NO_TARGET) // dist param insertion
+      { dist_param_deriv = true; break; }
+  if (dist_param_deriv) {
+    SizetMultiArrayConstView cv_ids = iteratedModel.continuous_variable_ids();
+    SizetArray x_dvv; copy_data(cv_ids, x_dvv);
+    SizetMultiArrayConstView acv_ids
+      = iteratedModel.all_continuous_variable_ids();
+    RealVector fn_grad_s(num_final_grad_vars, false);
+    natafTransform.trans_grad_X_to_S(fn_grad_x, fn_grad_s, x_vars, x_dvv,
+				     cv_ids, acv_ids, primaryACVarMapIndices,
+				     secondaryACVarMapTargets);
+    final_stat_grad = fn_grad_s;
+  }
+
+  // For design vars that are separate from the uncertain vars, perform a new
+  // fn eval for dg/ds, where s = inactive/design vars.  This eval must be
+  // performed at (s, x_vars) for each response fn for each level as
+  // required by final_asv.  RBDO typically specifies one level for 1 or
+  // more limit states, so the number of additional evals will usually be small.
+  if (secondaryACVarMapTargets.empty() ||
+      contains(secondaryACVarMapTargets, Pecos::NO_TARGET)) {
+    Cout << "\n>>>>> Evaluating sensitivity with respect to augmented inactive "
+	 << "variables\n";
+    if (mppSearchType && mppSearchType < NO_APPROX)
+      uSpaceModel.component_parallel_mode(TRUTH_MODEL);
+    iteratedModel.continuous_variables(x_vars);
+    ActiveSet inactive_grad_set = activeSet;
+    inactive_grad_set.request_values(0);
+    inactive_grad_set.request_value(2, respFnCount);
+    // final_dvv is mapped from the top-level DVV in NestedModel::set_mapping()
+    // and includes augmented and inserted variable ids.  Since we only want the
+    // augmented ids in this case, the UQ-level inactive ids are sufficient.
+    SizetMultiArrayConstView icv_ids
+      = iteratedModel.inactive_continuous_variable_ids();
+    inactive_grad_set.derivative_vector(icv_ids);
+    /* More rigorous with reqd deriv vars, but equivalent in practice:
+    // Filter final_dvv to contain only inactive continuous variable ids:
+    SizetArray filtered_final_dvv;
+    for (i=0; i<num_final_grad_vars; i++) {
+      size_t final_dvv_i = final_dvv[i];
+      if (contains(icv_ids, final_dvv_i))
+	filtered_final_dvv.push_back(final_dvv_i);
+    }
+    inactive_grad_set.derivative_vector(filtered_final_dvv);
+    */
+    iteratedModel.compute_response(inactive_grad_set);
+    const Response& curr_resp = iteratedModel.current_response();
+    if (secondaryACVarMapTargets.empty())
+      final_stat_grad = curr_resp.function_gradient_copy(respFnCount);
+    else {
+      const RealMatrix& fn_grads = curr_resp.function_gradients();
+      size_t cntr = 0;
+      for (i=0; i<num_final_grad_vars; i++)
+	if (secondaryACVarMapTargets[i] == Pecos::NO_TARGET)
+	  final_stat_grad[i] = fn_grads(cntr++, respFnCount);
+    }
+  }
+}
+
+
+Real NonDLocalReliability::
+signed_norm(Real norm_mpp_u, const RealVector& mpp_u,
+	    const RealVector& fn_grad_u, bool cdf_flag)
+{
+  // z>median: CDF p(g<=z)>0.5, CDF beta<0, CCDF p(g>z)<0.5, CCDF beta>0
+  // z<median: CDF p(g<=z)<0.5, CDF beta>0, CCDF p(g>z)>0.5, CCDF beta<0
+  // z=median: CDF p(g<=z) = CCDF p(g>z) = 0.5, CDF beta = CCDF beta = 0
+  //Real beta_cdf = (computedRespLevel > medianFnVals[respFnCount])
+  //              ? -std::sqrt(norm_u_sq) : std::sqrt(norm_u_sq);
+
+  // This approach avoids the need to evaluate medianFnVals.  Thanks to
+  // Barron Bichon for suggesting it.
+  // if <mppU, fnGradU> > 0, then G is increasing along u and G(u*) > G(0)
+  // if <mppU, fnGradU> < 0, then G is decreasing along u and G(u*) < G(0)
+  Real beta_cdf = (mpp_u.dot(fn_grad_u) > 0.) ? -norm_mpp_u : norm_mpp_u;
+#ifdef DEBUG
+  Cout << "\nSign of <mppU, fnGradU> is ";
+  if (mpp_u.dot(fn_grad_u) > 0.) Cout << " 1.\n\n";
+  else                           Cout << "-1.\n\n";
+#endif
+  return (cdf_flag) ? beta_cdf : -beta_cdf;
+}
+
+
+/** Converts beta into a probability using either first-order (FORM) or
+    second-order (SORM) integration.  The SORM calculation first calculates
+    the principal curvatures at the MPP (using the approach in Ch. 8 of
+    Haldar & Mahadevan), and then applies correction formulations from the
+    literature (Breitung, Hohenbichler-Rackwitz, or Hong). */
+Real NonDLocalReliability::
+probability(Real beta, bool cdf_flag, const RealVector& mpp_u,
+	    const RealVector& fn_grad_u, const RealSymMatrix& fn_hess_u)
+{
+  Real p = probability(beta); // FORM approximation
+  int wpp7;
+  if (outputLevel > NORMAL_OUTPUT) {
+    wpp7 = write_precision+7;
+    Cout << "Probability:"// << " beta = " << beta
+	 << " first-order = " << std::setw(wpp7) << p;
+  }
+
+  if (integrationOrder == 2 && curvatureDataAvailable) {
+
+    if (!kappaUpdated) {
+      principal_curvatures(mpp_u, fn_grad_u, fn_hess_u, kappaU);
+      kappaUpdated = true;
+    }
+
+    // The correction to p is applied for beta >= 0 (FORM p <= 0.5).
+    // For beta < 0, apply correction to complementary problem (Tvedt 1990).
+    //   > beta changes sign
+    //   > p becomes complement
+    //   > principal curvature sign convention defined for CDF beta > 0
+    //     (negate for CDF beta < 0 or CCDF beta > 0, OK for CCDF beta < 0)
+    Real beta_corr = std::abs(beta);
+    Real p_corr    = (beta >= 0.) ? p : 1. - p;
+    RealVector kappa; scale_curvature(beta, cdf_flag, kappaU, kappa);
+
+    // Test for numerical exceptions in sqrt.  Problematic kappa are large and
+    // negative (kterm is always positive).  Skipping individual kappa means
+    // neglecting a primary curvature and including secondary curvatures, which
+    // may be counter-productive (potentially less accurate than FORM).
+    // Therefore, the entire correction is skipped if any curvature is
+    // problematic.  A consistent approach must be used in reliability().
+    bool apply_correction = true;
+    Real psi_m_beta;
+    if (secondOrderIntType != BREITUNG)
+      psi_m_beta = Pecos::phi(-beta_corr) / Pecos::Phi(-beta_corr);
+    Real kterm = (secondOrderIntType == BREITUNG) ? beta_corr : psi_m_beta;
+    int i, num_kappa = numUncertainVars - 1;
+    for (i=0; i<num_kappa; i++) {
+      //Cout << "1 + kterm*kappa = " << 1. + kterm * kappa[i] << std::endl;
+      // Numerical exception happens for 1+ktk <= 0., but inaccuracy can happen
+      // earlier.  Empirical evidence to date suggests a threshold of 0.5
+      // (1/std::sqrt(0.5) = 1.414 multiplier = 41.4% increase in p.
+      if (1. + kterm * kappa[i] <= curvatureThresh)
+	apply_correction = false;
+    }
+
+    if (apply_correction) {
+      // compute SORM estimate (Breitung, Hohenbichler-Rackwitz, or Hong).
+      Real C1 = 0., ktk;
+      for (i=0; i<num_kappa; i++) {
+	// Breitung 1984:              p_corr /= std::sqrt(1+beta_corr*kappa)
+	// Hohenbichler-Rackwitz 1988: p_corr /= std::sqrt(1+psi_m_beta*kappa)
+	// > Note that psi(-beta) -> beta as beta increases: HR -> Breitung
+	// Hong 1999, P3 formulation:  p_corr =  C1 * p_HR
+	ktk = kterm * kappa[i];
+	p_corr /= std::sqrt(1. + ktk);
+	if (secondOrderIntType == HONG) {
+	  Real hterm = num_kappa * kappa[i] / 2. / (1. + ktk);
+	  C1 += Pecos::Phi(-beta_corr-hterm) / Pecos::Phi(-beta_corr)
+	     *  exp(psi_m_beta*hterm);
+	}
+      }
+      if (secondOrderIntType == HONG) {
+	C1 /= num_kappa;
+	p_corr *= C1;
+      }
+      if (p_corr >= 0. && p_corr <= 1.) { // verify p_corr within valid range
+	p = (beta >= 0.) ? p_corr : 1. - p_corr;
+	if (outputLevel > NORMAL_OUTPUT)
+	  Cout << " second-order = " << std::setw(wpp7) << p;
+      }
+      else {
+	Cerr << "\nWarning: second-order probability integration bypassed due "
+	     << "to numerical issues (corrected p outside [0,1]).\n";
+	warningBits |= 2; // second warning in output summary
+      }
+    }
+    else {
+      Cerr << "\nWarning: second-order probability integration bypassed due "
+	   << "to numerical issues (curvature threshold exceeded).\n";
+      warningBits |= 2; // second warning in output summary
+    }
+  }
+
+  if (integrationRefinement) { // IS/AIS/MMAIS
+    // rep needed for access to functions not mapped to Iterator level
+    NonDAdaptImpSampling* importance_sampler_rep
+      = (NonDAdaptImpSampling*)importanceSampler.iterator_rep();
+    bool x_data_flag = false;
+    importance_sampler_rep->
+      initialize(mpp_u, x_data_flag, respFnCount, p, requestedTargetLevel);
+    ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator(miPLIndex);
+    importanceSampler.run(pl_iter);
+    p = importance_sampler_rep->final_probability();
+    if (outputLevel > NORMAL_OUTPUT)
+      Cout << " refined = " << std::setw(wpp7) << p;
+  }
+  if (outputLevel > NORMAL_OUTPUT)
+    Cout << '\n';
+#ifdef DEBUG
+  if (integrationOrder == 2 && curvatureDataAvailable)
+    { Cout << "In probability(), kappaU:\n"; write_data(Cout, kappaU); }
+#endif
+
+  return p;
+}
+
+
+/** Compute sensitivity of second-order probability w.r.t. beta for use
+    in derivatives of p_2 or beta* w.r.t. auxilliary parameters s (design,
+    epistemic) or derivatives of beta* w.r.t. u in PMA2_constraint_eval(). */
+Real NonDLocalReliability::dp2_dbeta_factor(Real beta, bool cdf_flag)
+{
+  //   dp/ds     = -phi(-beta) * dbeta/ds
+  //               (fall back to first-order, if needed)
+  //   dp_2/ds   = [Phi(-beta_corr)*sum - phi(-beta_corr)*prod] * dbeta/ds
+  //               (this function computes term in brackets)
+  //   dbeta*/ds = -1/phi(-beta*) * dp_2/ds    (second-order)
+
+  // For beta < 0, beta_corr = -beta and p_corr = 1 - p:
+  // dp/ds = -dp_corr/ds = -(dp_corr/dbeta_corr * dbeta_corr/ds)
+  //       = -(dp_corr/dbeta_corr * -dbeta/ds)
+  //       = dp_corr/dbeta_corr * dbeta/ds
+
+  bool apply_correction; size_t i, j, num_kappa;
+  Real kterm, dpsi_m_beta_dbeta, beta_corr; RealVector kappa;
+  if (curvatureDataAvailable) {
+
+    //if (!kappaUpdated) { // should already be up to date
+    //  principal_curvatures(mpp_u, fn_grad_u, fn_hess_u, kappaU);
+    //  kappaUpdated = true;
+    //}
+
+    scale_curvature(beta, cdf_flag, kappaU, kappa);
+    beta_corr = (beta >= 0.) ? beta : -beta;
+
+    switch (secondOrderIntType) {
+    case HONG: // addtnl complexity not warranted
+      Cerr << "\nError: final statistic gradients not implemented for Hong."
+	   << std::endl;
+      abort_handler(-1); break;
+    case BREITUNG:
+      kterm = beta_corr; break;
+    case HOHENRACK:
+      // Psi(beta) = phi(beta) / Phi(beta)
+      // dPsi/dbeta = (Phi dphi/dbeta - phi^2)/Phi^2
+      //   where dphi/dbeta = -beta phi
+      // dPsi/dbeta = -phi (beta Phi + phi) / Phi^2
+      //            = -Psi (beta + Psi)
+      // --> dPsi/dbeta(-beta_corr)
+      //       = -Psi(-beta_corr) (-beta_corr + Psi(-beta_corr) )
+      //       =  Psi(-beta_corr) ( beta_corr - Psi(-beta_corr) )
+      kterm = Pecos::phi(-beta_corr) / Pecos::Phi(-beta_corr); // psi_m_beta
+      dpsi_m_beta_dbeta = kterm*(beta_corr - kterm); // orig (kterm + beta_corr)
+      break;
+    }
+
+    num_kappa = numUncertainVars - 1;
+    apply_correction = true;
+    for (i=0; i<num_kappa; ++i)
+      if (1. + kterm * kappa[i] <= curvatureThresh)
+	apply_correction = false;
+
+    if (apply_correction) {
+      Real sum = 0., ktk, prod1, prod2 = 1.;
+      for (i=0; i<num_kappa; ++i) {
+	ktk = kterm * kappa[i];
+	prod2 /= std::sqrt(1. + ktk);
+	prod1 = 1.;
+	for (j=0; j<num_kappa; ++j)
+	  if (j != i)
+	    prod1 /= std::sqrt(1. + kterm * kappa[j]);
+	prod1 *= kappa[i] / 2. / std::pow(1. + ktk, 1.5);
+	if (secondOrderIntType != BREITUNG)
+	  prod1 *= dpsi_m_beta_dbeta;
+	sum -= prod1;
+      }
+
+      // verify p_corr within (0,1) for consistency with probability()
+      Real p1_corr = probability(beta_corr), p2_corr = p1_corr * prod2;
+      if (p2_corr >= 0. && p2_corr <= 1.) // factor for second-order dp/ds:
+	return p1_corr * sum - Pecos::phi(-beta_corr) * prod2;
+    }
+
+    // if not returned, then there was an exception
+    Cerr << "\nWarning: second-order probability sensitivity bypassed.\n";
+    warningBits |= 2; // second warning in output summary
+  }
+
+  return -Pecos::phi(-beta);
+}
+
+
+// Converts a probability into a reliability using the inverse of the
+// first-order or second-order integrations implemented in
+// NonDLocalReliability::probability().
+Real NonDLocalReliability::
+reliability(Real p, bool cdf_flag, const RealVector& mpp_u,
+	    const RealVector& fn_grad_u, const RealSymMatrix& fn_hess_u)
+{
+  Real beta = reliability(p); // FORM approximation
+
+  if (integrationOrder == 2 && curvatureDataAvailable) {
+
+    if (!kappaUpdated) {
+      principal_curvatures(mpp_u, fn_grad_u, fn_hess_u, kappaU);
+      kappaUpdated = true;
+    }
+
+    // NOTE: these conversions are currently done once.  It may be necessary
+    // to redo them for each beta estimate (when inverting near beta = zero).
+    Real beta_corr = (beta >= 0.) ? beta : -beta;
+    Real p_corr    = (beta >= 0.) ? p    : 1. - p;
+    RealVector kappa; scale_curvature(beta, cdf_flag, kappaU, kappa);
+
+    // SORM correction to FORM: direct inversion of the SORM formulas is
+    // infeasible due to the multiple instances of beta on the RHS, even for
+    // the simplest case (Breitung).  Therefore, use Newton's method to solve
+    // for beta(p) using Phi_inverse() as an initial guess.
+    // > Newton's method uses reliability_residual() to compute the residual f
+    //   and reliability_residual_derivative() to compute df/dbeta.
+    // > Newton step is then beta -= f(beta)/[df/dbeta(beta)].
+    // > Other options include using an inexact df/dbeta = phi(-beta) from
+    //   FORM or using a quasi-Newton (Broyden update) or FD Newton approach.
+
+    // evaluate residual
+    Real res;
+    bool terminate = reliability_residual(p_corr, beta_corr, kappa, res);
+
+    size_t newton_iters = 0, max_iters = 20; // usually converges in ~3 iters
+    bool converged = false;
+    while (!terminate && !converged) {
+
+      // evaluate derivative of residual w.r.t. beta
+      Real dres_dbeta
+	= reliability_residual_derivative(p_corr, beta_corr, kappa);
+
+      // compute Newton step
+      Real delta_beta;
+      if (std::fabs(dres_dbeta) > DBL_MIN) {
+	delta_beta = -res/dres_dbeta; // full Newton step
+	// assess convergence using delta_beta, rather than residual,
+	// since this should be better scaled.
+	if (std::fabs(delta_beta) < convergenceTol)
+	  converged = true; // but go ahead and take the step, if beneficial
+      }
+      else
+	terminate = true;
+
+      // Simple backtracking line search globalization
+      bool reduction = false;
+      size_t backtrack_iters = 0;
+      while (!reduction && !terminate) { // enter loop even if converged
+	Real beta_step = beta_corr + delta_beta;
+
+	// verify that new beta_step doesn't violate safeguards.  If not,
+	// evaluate residual res_step at beta_step.
+	Real res_step;
+	terminate = reliability_residual(p_corr, beta_step, kappa, res_step);
+
+	if (!terminate) {
+	  if ( std::fabs(res_step) < std::fabs(res) ) { // accept step
+	    reduction = true;
+	    beta_corr = beta_step;
+	    res       = res_step;
+	    //Cout << "residual = " << res << " delta = " << delta_beta
+	    //     << " beta = " << beta_corr <<'\n';
+	  }
+	  else if (converged)
+	    terminate = true; // kick out of inner while
+	  else { // backtrack
+	    //Cout << "Backtracking\n";
+	    delta_beta /= 2.; // halve the step
+	    if (backtrack_iters++ >= max_iters) {// backtrack iter must complete
+	      Cerr << "\nWarning: maximum back-tracking iterations exceeded in "
+		   << "second-order reliability inversion.\n";
+	      warningBits |= 4; // third warning in output summary
+	      terminate = true;
+	    }
+	  }
+	}
+      }
+      if (++newton_iters >= max_iters && !converged) { // Newton iter completed
+	Cerr << "\nWarning: maximum Newton iterations exceeded in second-order "
+	     << "reliability inversion.\n";
+	warningBits |= 8; // fourth warning in output summary
+	terminate = true;
+      }
+    }
+    return (beta >= 0.) ? beta_corr : -beta_corr;
+  }
+  return beta;
+}
+
+
+bool NonDLocalReliability::
+reliability_residual(const Real& p, const Real& beta,
+		     const RealVector& kappa, Real& res)
+{
+  int i, num_kappa = numUncertainVars - 1;
+
+  // Test for numerical exceptions in sqrt.  Problematic kappa are large and
+  // negative (kterm is always positive).  Skipping individual kappa means
+  // neglecting a primary curvature and including secondary curvatures, which
+  // may be counter-productive (potentially less accurate than FORM).  Since
+  // the Newton solve can be problematic on its own, skip the entire solve in
+  // this case.
+  Real psi_m_beta;
+  if (secondOrderIntType != BREITUNG)
+    psi_m_beta = Pecos::phi(-beta) / Pecos::Phi(-beta);
+  Real kterm = (secondOrderIntType == BREITUNG) ? beta : psi_m_beta;
+  for (i=0; i<num_kappa; i++)
+    if (1. + kterm * kappa[i] <= curvatureThresh) {
+      Cerr << "\nWarning: second-order probability integration bypassed due to "
+	   << "numerical issues.\n";
+      warningBits |= 2; // second warning in output summary
+      return true;
+    }
+
+  // evaluate residual of f(beta,p) = 0 where p is a prescribed constant:
+  //   Breitung: f = 0 = p * Prod_i(sqrt(1+beta*kappa)) - Phi(-beta)
+  //   HohRack:  f = 0 = p * Prod_i(sqrt(1+psi(-beta)*kappa)) - Phi(-beta)
+  //   Hong:     f = 0 = p * Prod_i(sqrt(1+psi(-beta)*kappa)) - C1*Phi(-beta)
+  Real prod = 1., ktk, C1 = 0.;
+  for (i=0; i<num_kappa; i++) {
+    ktk = kterm * kappa[i];
+    prod *= std::sqrt(1. + ktk);
+    if (secondOrderIntType == HONG) {
+      Real hterm = num_kappa * kappa[i] / 2. / (1. + ktk);
+      C1 += Pecos::Phi(-beta - hterm) / Pecos::Phi(-beta)
+	 *  exp(psi_m_beta * hterm);
+    }
+  }
+  if (secondOrderIntType == HONG)
+    res = p * prod - C1 * Pecos::Phi(-beta);
+  else
+    res = p * prod - Pecos::Phi(-beta);
+
+  return false;
+}
+
+
+Real NonDLocalReliability::
+reliability_residual_derivative(const Real& p, const Real& beta,
+				const RealVector& kappa)
+{
+  int i, j, num_kappa = numUncertainVars - 1;
+  Real psi_m_beta, dpsi_m_beta_dbeta;
+  if (secondOrderIntType != BREITUNG) {
+    psi_m_beta = Pecos::phi(-beta) / Pecos::Phi(-beta);
+    dpsi_m_beta_dbeta = psi_m_beta*(beta + psi_m_beta);
+  }
+
+  // evaluate derivative of residual w.r.t. beta
+  Real prod, dres_dbeta, sum = 0.;
+  Real kterm = (secondOrderIntType == BREITUNG) ? beta : psi_m_beta;
+  for (i=0; i<num_kappa; i++) {
+    prod = 1.;
+    for (j=0; j<num_kappa; j++)
+      if (j != i)
+	prod *= std::sqrt(1. + kterm*kappa[j]);
+    prod *= kappa[i]/2./std::sqrt(1. + kterm*kappa[i]);
+    if (secondOrderIntType != BREITUNG)
+      prod *= dpsi_m_beta_dbeta;
+    sum += prod;
+  }
+  if (secondOrderIntType == HONG) { // addtnl complexity may not be warranted
+    Cerr << "\nError: reliability residual derivative not implemented for Hong."
+	 << std::endl;
+    abort_handler(-1);
+    //dres_dbeta = p * sum + C1 * phi(-beta) - Phi(-beta) * dC1_dbeta;
+  }
+  else
+    dres_dbeta = p * sum + Pecos::phi(-beta);
+
+  return dres_dbeta;
+}
+
+
+void NonDLocalReliability::
+principal_curvatures(const RealVector& mpp_u, const RealVector& fn_grad_u,
+		     const RealSymMatrix& fn_hess_u, RealVector& kappa_u)
+{
+  // fn_grad_u, fn_hess_u, and possibly mpp_u (alternate R0 initialization)
+  // must be in synch and be a reasonable approximation to converged MPP data.
+
+  // compute R matrix
+  int i, j, k, num_vars = mpp_u.length(), num_kappa = num_vars - 1;
+  RealMatrix R0(num_vars, num_vars);// init to 0
+  // initialize R0: last row values are direction cosines
+  for (i=0; i<num_kappa; i++)
+    R0(i, i) = 1.;
+  // Haldar & Mahadevan, p.227: last row = unit gradient vector of limit state
+  Real norm_grad_u = fn_grad_u.normFrobenius();
+  if (norm_grad_u > DBL_MIN)
+    for (i=0; i<num_vars; i++)
+      R0(num_kappa, i) = fn_grad_u[i]/norm_grad_u;
+  else { // fallback: try to use +/- mPPU[i]/norm(mPPU)
+    Real norm_mpp_u = mpp_u.normFrobenius(); // unsigned beta
+    if (norm_mpp_u > DBL_MIN) {
+      // Can match the sign of fn_grad_u[i]/norm_grad_u in PMA case (align for
+      // max G, oppose for min G), but can't in general in RIA case (vectors
+      // may align or oppose).  Fortunately, the R0 sign does not appear to
+      // matter since R is applied twice in R fn_hess_u R^T.
+      bool pma_max = (levelCount >= requestedRespLevels[respFnCount].length() &&
+		      pmaMaximizeG) ? true : false;
+      for (i=0; i<num_vars; i++)
+	R0(num_kappa, i) = (pma_max) ?  mpp_u[i]/norm_mpp_u  // aligned
+	                             : -mpp_u[i]/norm_mpp_u; // opposed
+    }
+    else {
+      // Note: for Breitung, kappa_i do not matter if beta = 0.
+      Cerr << "\nError: unable to initialize R0 in principal_curvatures() "
+	   << "calculation." << std::endl;
+      abort_handler(-1);
+    }
+  }
+  //Cout << "\nR0:" << R0;
+
+  // orthogonalize using Gram-Schmidt
+  RealMatrix R(R0);
+  for (i=num_vars-2; i>=0; i--) {  // update the ith row vector
+    for (j=i+1; j<num_vars; j++) { // orthogonalize to jth row vector
+      Real scale1 = 0., scale2 = 0.;
+      for (k=0; k<num_vars; k++) { // kth column
+	scale1 += R(j, k) * R0(i, k);
+	scale2 += std::pow(R(j, k), 2);
+      }
+      Real scale = scale1 / scale2;
+      for (k=0; k<num_vars; k++)   // kth column
+	R(i, k) -= scale * R(j, k);
+    }
+    // renormalize ith row vector to unit length
+    Real len = 0.;
+    for (j=0; j<num_vars; j++)
+      len += std::pow(R(i, j), 2);
+    len = std::sqrt(len);
+    for (j=0; j<num_vars; j++)
+      R(i, j) /= len;
+  }
+  //Cout << "\nR:" << R;
+
+  // compute A matrix = (R fn_hess_u R^T)/norm(fn_grad_u)
+  RealSymMatrix A(num_vars, false);
+  Teuchos::symMatTripleProduct(Teuchos::NO_TRANS, 1./norm_grad_u, fn_hess_u,
+			       R, A);
+  //Cout << "\nA:" << A;
+  A.reshape(num_kappa); // upper left portion of matrix
+  //Cout << "\nReshaped A:" << A;
+
+  // compute eigenvalues of A --> principal curvatures
+  Teuchos::LAPACK<int, Real> la;
+  int info, lwork = 3*num_kappa - 1;
+  double* work = new double [lwork];
+  // LAPACK eigenvalue solution for real, symmetric A
+  if (kappa_u.length() != num_kappa)
+    kappa_u.sizeUninitialized(num_kappa);
+  la.SYEV('N', A.UPLO(), num_kappa, A.values(), A.stride(), kappa_u.values(),
+	  work, lwork, &info);
+  delete [] work;
+  if (info) {
+    Cerr << "\nError: internal error in LAPACK eigenvalue routine."
+         << std::endl;
+    abort_handler(-1);
+  }
+  //Cout << "\nkappa_u:" << kappa_u;
+}
+
+
+void NonDLocalReliability::print_results(std::ostream& s)
+{
+  size_t i, j, width = write_precision+7;
+  StringMultiArrayConstView uv_labels
+    = iteratedModel.continuous_variable_labels();
+  const StringArray& fn_labels = iteratedModel.response_labels();
+  s << "-----------------------------------------------------------------\n";
+
+  if (warningBits) {
+    s << "Warnings accumulated during solution for one or more levels:\n";
+    if (warningBits & 1)
+      s << "  Maximum number of limit state approximation cycles exceeded.\n";
+    if (warningBits & 2)
+      s << "  Second-order probability integration bypassed due to numerical "
+	<< "issues.\n";
+    if (warningBits & 4)
+      s << "  Maximum back-tracking iterations exceeded in second-order "
+	<< "reliability inversion.\n";
+    if (warningBits & 8)
+      s << "  Maximum Newton iterations exceeded in second-order reliability "
+	<< "inversion.\n";
+    s << "Please interpret results with care.\n";
+    s << "-----------------------------------------------------------------\n";
+  }
+
+  for (i=0; i<numFunctions; i++) {
+
+    // output MV-specific statistics
+    if (!mppSearchType) {
+      s << "MV Statistics for " << fn_labels[i] << ":\n";
+      // approximate response means and std deviations and importance factors
+      Real& std_dev = momentStats(1,i);
+      s << "  Approximate Mean Response                  = " << std::setw(width)
+	<< momentStats(0,i) << "\n  Approximate Standard Deviation of Response"
+	<< " = " << std::setw(width)<< std_dev << '\n';
+      if (natafTransform.x_correlation() || std_dev < Pecos::SMALL_NUMBER)
+	s << "  Importance Factors not available.\n";
+      else
+	for (j=0; j<numUncertainVars; j++)
+	  s << "  Importance Factor for variable "
+	    << std::setiosflags(std::ios::left) << std::setw(11)
+	    << uv_labels[j].data() << " = "
+	    << std::resetiosflags(std::ios::adjustfield)
+	    << std::setw(width) << impFactor(j,i)
+      // Added sensitivity output to importance factors
+      << "  Sensitivity = "
+      << std::resetiosflags(std::ios::adjustfield)
+      << std::setw(width) << fnGradsMeanX(j,i) << '\n';
+    }
+
+    // output CDF/CCDF response/probability pairs
+    size_t num_levels = computedRespLevels[i].length();
+    if (num_levels) {
+      if (!mppSearchType && momentStats(1,i) < Pecos::SMALL_NUMBER)
+        s << "\nWarning: negligible standard deviation renders CDF results "
+          << "suspect.\n\n";
+      if (cdfFlag)
+        s << "Cumulative Distribution Function (CDF) for ";
+      else
+        s << "Complementary Cumulative Distribution Function (CCDF) for ";
+
+      s << fn_labels[i] << ":\n     Response Level  Probability Level  "
+	<< "Reliability Index  General Rel Index\n     --------------  "
+	<< "-----------------  -----------------  -----------------\n";
+      for (j=0; j<num_levels; j++)
+        s << "  " << std::setw(width) << computedRespLevels[i][j]
+	  << "  " << std::setw(width) << computedProbLevels[i][j]
+	  << "  " << std::setw(width) << computedRelLevels[i][j]
+	  << "  " << std::setw(width) << computedGenRelLevels[i][j] << '\n';
+    }
+  }
+
+  //s << "Final statistics:\n" << finalStatistics;
+
+  s << "-----------------------------------------------------------------"
+    << std::endl;
+}
+
+
+void NonDLocalReliability::method_recourse()
+{
+  Cerr << "\nWarning: method recourse invoked in NonDLocalReliability due to "
+       << "detected method conflict.\n\n";
+  if (mppSearchType && npsolFlag) {
+#ifdef HAVE_OPTPP
+    mppOptimizer.assign_rep(
+      new SNLLOptimizer("optpp_q_newton", mppModel), false);
+#else
+    Cerr << "\nError: method recourse not possible in NonDLocalReliability "
+	 << "(OPT++ NIP unavailable).\n";
+    abort_handler(-1);
+#endif
+    npsolFlag = false;
+  }
+}
+
+} // namespace Dakota
Index: /issm/trunk/externalpackages/dakota/configs/6.2/src/NonDSampling.cpp
===================================================================
--- /issm/trunk/externalpackages/dakota/configs/6.2/src/NonDSampling.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/configs/6.2/src/NonDSampling.cpp	(revision 24686)
@@ -0,0 +1,1389 @@
+/*  _______________________________________________________________________
+
+    DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
+    Copyright 2014 Sandia Corporation.
+    This software is distributed under the GNU Lesser General Public License.
+    For more information, see the README file in the top Dakota directory.
+    _______________________________________________________________________ */
+
+//- Class:	 NonDSampling
+//- Description: Implementation code for NonDSampling class
+//- Owner:       Mike Eldred
+//- Checked by:
+//- Version:
+
+#include "dakota_data_types.hpp"
+#include "dakota_system_defs.hpp"
+#include "DakotaModel.hpp"
+#include "DakotaResponse.hpp"
+#include "NonDSampling.hpp"
+#include "ProblemDescDB.hpp"
+#include "SensAnalysisGlobal.hpp"
+#include "pecos_data_types.hpp"
+#include "pecos_stat_util.hpp"
+#include <algorithm>
+
+#include <boost/math/special_functions/fpclassify.hpp>
+
+static const char rcsId[]="@(#) $Id: NonDSampling.cpp 7036 2010-10-22 23:20:24Z mseldre $";
+
+
+namespace Dakota {
+
+
+/** This constructor is called for a standard letter-envelope iterator
+    instantiation.  In this case, set_db_list_nodes has been called and
+    probDescDB can be queried for settings from the method specification. */
+NonDSampling::NonDSampling(ProblemDescDB& problem_db, Model& model):
+  NonD(problem_db, model), seedSpec(probDescDB.get_int("method.random_seed")),
+  randomSeed(seedSpec), samplesSpec(probDescDB.get_int("method.samples")),
+  samplesRef(samplesSpec), numSamples(samplesSpec),
+  rngName(probDescDB.get_string("method.random_number_generator")),
+  sampleType(probDescDB.get_ushort("method.sample_type")),
+  statsFlag(true), allDataFlag(false), samplingVarsMode(ACTIVE),
+  sampleRanksMode(IGNORE_RANKS),
+  varyPattern(!probDescDB.get_bool("method.fixed_seed")),
+  backfillFlag(probDescDB.get_bool("method.backfill")),
+  numLHSRuns(0)
+{
+  if (epistemicStats && totalLevelRequests) {
+    Cerr << "\nError: sampling does not support level requests for "
+	 << "analyses containing epistemic uncertainties." << std::endl;
+    abort_handler(-1);
+  }
+
+  // Since the sampleType is shared with other iterators for other purposes,
+  // its default in DataMethod.cpp is SUBMETHOD_DEFAULT (0).  Enforce an LHS
+  // default here.
+  if (!sampleType)
+    sampleType = SUBMETHOD_LHS;
+
+  // initialize finalStatistics using the default statistics set
+  initialize_final_statistics();
+
+  // update concurrency
+  if (numSamples) // samples is optional (default = 0)
+    maxEvalConcurrency *= numSamples;
+}
+
+
+/** This alternate constructor is used for generation and evaluation
+    of on-the-fly sample sets. */
+NonDSampling::
+NonDSampling(unsigned short method_name, Model& model,
+	     unsigned short sample_type, int samples, int seed,
+	     const String& rng, bool vary_pattern, short sampling_vars_mode):
+  NonD(method_name, model), seedSpec(seed), randomSeed(seed),
+  samplesSpec(samples), samplesRef(samples), numSamples(samples), rngName(rng),
+  sampleType(sample_type), statsFlag(false), allDataFlag(true),
+  samplingVarsMode(sampling_vars_mode), sampleRanksMode(IGNORE_RANKS),
+  varyPattern(vary_pattern), backfillFlag(false), numLHSRuns(0)
+{
+  subIteratorFlag = true; // suppress some output
+
+  // override default epistemicStats setting from NonD ctor
+  bool aleatory_mode = (samplingVarsMode == ALEATORY_UNCERTAIN ||
+			samplingVarsMode == ALEATORY_UNCERTAIN_UNIFORM);
+  epistemicStats = (numEpistemicUncVars && !aleatory_mode);
+
+  // enforce LHS as default sample type
+  if (!sampleType)
+    sampleType = SUBMETHOD_LHS;
+
+  // not used but included for completeness
+  if (numSamples) // samples is optional (default = 0)
+    maxEvalConcurrency *= numSamples;
+}
+
+
+/** This alternate constructor is used by ConcurrentStrategy for
+    generation of uniform, uncorrelated sample sets. */
+NonDSampling::
+NonDSampling(unsigned short sample_type, int samples, int seed,
+	     const String& rng, const RealVector& lower_bnds,
+	     const RealVector& upper_bnds):
+  NonD(RANDOM_SAMPLING, lower_bnds, upper_bnds), seedSpec(seed),
+  randomSeed(seed), samplesSpec(samples), samplesRef(samples),
+  numSamples(samples), rngName(rng), sampleType(sample_type), statsFlag(false),
+  allDataFlag(true), samplingVarsMode(ACTIVE_UNIFORM),
+  sampleRanksMode(IGNORE_RANKS), varyPattern(true), backfillFlag(false),
+  numLHSRuns(0)
+{
+  subIteratorFlag = true; // suppress some output
+
+  // enforce LHS as default sample type
+  if (!sampleType)
+    sampleType = SUBMETHOD_LHS;
+
+  // not used but included for completeness
+  if (numSamples) // samples is optional (default = 0)
+    maxEvalConcurrency *= numSamples;
+}
+
+
+NonDSampling::~NonDSampling()
+{ }
+
+
+/** This version of get_parameter_sets() extracts data from the
+    user-defined model in any of the four sampling modes. */
+void NonDSampling::get_parameter_sets(Model& model)
+{
+  initialize_lhs(true);
+
+  short model_view = model.current_variables().view().first;
+  switch (samplingVarsMode) {
+  case ACTIVE_UNIFORM: case ALL_UNIFORM: case UNCERTAIN_UNIFORM:
+  case ALEATORY_UNCERTAIN_UNIFORM: case EPISTEMIC_UNCERTAIN_UNIFORM:
+    // Use LHSDriver::generate_uniform_samples() between lower/upper bounds
+    if ( samplingVarsMode == ACTIVE_UNIFORM ||
+	 ( samplingVarsMode == ALL_UNIFORM &&
+	   ( model_view == RELAXED_ALL || model_view == MIXED_ALL ) ) ||
+	 ( samplingVarsMode == UNCERTAIN_UNIFORM &&
+	   ( model_view == RELAXED_UNCERTAIN ||
+	     model_view == MIXED_UNCERTAIN ) ) ||
+	 ( samplingVarsMode == ALEATORY_UNCERTAIN_UNIFORM &&
+	   ( model_view == RELAXED_ALEATORY_UNCERTAIN ||
+	     model_view == MIXED_ALEATORY_UNCERTAIN ) ) ||
+	 ( samplingVarsMode == EPISTEMIC_UNCERTAIN_UNIFORM &&
+	   ( model_view == RELAXED_EPISTEMIC_UNCERTAIN ||
+	     model_view == MIXED_EPISTEMIC_UNCERTAIN ) ) ) {
+      // sample uniformly from ACTIVE lower/upper bounds (regardless of model
+      // view), from UNCERTAIN lower/upper bounds (with model in DISTINCT view),
+      // or from ALL lower/upper bounds (with model in ALL view).
+      // loss of sampleRanks control is OK since NonDIncremLHS uses ACTIVE mode.
+      // TO DO: add support for uniform discrete
+      lhsDriver.generate_uniform_samples(model.continuous_lower_bounds(),
+					 model.continuous_upper_bounds(),
+					 numSamples, allSamples, backfillFlag);
+    }
+    else if (samplingVarsMode == ALL_UNIFORM) {
+      // sample uniformly from ALL lower/upper bnds with model in distinct view.
+      // loss of sampleRanks control is OK since NonDIncremLHS uses ACTIVE mode.
+      // TO DO: add support for uniform discrete
+      lhsDriver.generate_uniform_samples(model.all_continuous_lower_bounds(),
+					 model.all_continuous_upper_bounds(),
+					 numSamples, allSamples, backfillFlag);
+    }
+    else { // A, E, A+E UNCERTAIN
+      // sample uniformly from {A,E,A+E} UNCERTAIN lower/upper bounds
+      // with model using a non-corresponding view (corresponding views
+      // handled in first case above)
+      size_t start_acv, num_acv, dummy;
+      mode_counts(model, start_acv, num_acv, dummy, dummy, dummy, dummy,
+		  dummy, dummy);
+      if (!num_acv) {
+	Cerr << "Error: no active continuous variables for sampling in "
+	     << "uniform mode" << std::endl;
+	abort_handler(-1);
+      }
+      const RealVector& all_c_l_bnds = model.all_continuous_lower_bounds();
+      const RealVector& all_c_u_bnds = model.all_continuous_upper_bounds();
+      RealVector uncertain_c_l_bnds(Teuchos::View,
+	const_cast<Real*>(&all_c_l_bnds[start_acv]), num_acv);
+      RealVector uncertain_c_u_bnds(Teuchos::View,
+	const_cast<Real*>(&all_c_u_bnds[start_acv]), num_acv);
+      // loss of sampleRanks control is OK since NonDIncremLHS uses ACTIVE mode
+      // TO DO: add support for uniform discrete
+      lhsDriver.generate_uniform_samples(uncertain_c_l_bnds,
+					 uncertain_c_u_bnds,
+					 numSamples, allSamples, backfillFlag);
+    }
+    break;
+  case ALEATORY_UNCERTAIN:
+    {
+      lhsDriver.generate_samples(model.aleatory_distribution_parameters(),
+				 numSamples, allSamples, backfillFlag);
+      break;
+    }
+  case EPISTEMIC_UNCERTAIN:
+    {
+      lhsDriver.generate_samples(model.epistemic_distribution_parameters(),
+				 numSamples, allSamples, backfillFlag);
+      break;
+    }
+  case UNCERTAIN:
+    {
+      lhsDriver.generate_samples(model.aleatory_distribution_parameters(),
+				 model.epistemic_distribution_parameters(),
+				 numSamples, allSamples, backfillFlag);
+      break;
+    }
+  case ACTIVE: case ALL: {
+    // extract design and state bounds
+    RealVector  cdv_l_bnds,   cdv_u_bnds,   csv_l_bnds,   csv_u_bnds;
+    IntVector ddriv_l_bnds, ddriv_u_bnds, dsriv_l_bnds, dsriv_u_bnds;
+    const RealVector& all_c_l_bnds = model.all_continuous_lower_bounds();
+    const RealVector& all_c_u_bnds = model.all_continuous_upper_bounds();
+    if (numContDesVars) {
+      cdv_l_bnds
+	= RealVector(Teuchos::View, all_c_l_bnds.values(), numContDesVars);
+      cdv_u_bnds
+	= RealVector(Teuchos::View, all_c_u_bnds.values(), numContDesVars);
+    }
+    if (numContStateVars) {
+      size_t cv_start = model.acv() - numContStateVars;
+      csv_l_bnds = RealVector(Teuchos::View,
+	const_cast<Real*>(&all_c_l_bnds[cv_start]), numContStateVars);
+      csv_u_bnds = RealVector(Teuchos::View,
+	const_cast<Real*>(&all_c_u_bnds[cv_start]), numContStateVars);
+    }
+    const IntVector& all_di_l_bnds = model.all_discrete_int_lower_bounds();
+    const IntVector& all_di_u_bnds = model.all_discrete_int_upper_bounds();
+    size_t num_ddriv = (numDiscIntDesVars) ?
+      numDiscIntDesVars - model.discrete_design_set_int_values().size() : 0;
+    if (num_ddriv) {
+      ddriv_l_bnds = IntVector(Teuchos::View, all_di_l_bnds.values(),num_ddriv);
+      ddriv_u_bnds = IntVector(Teuchos::View, all_di_u_bnds.values(),num_ddriv);
+    }
+    size_t num_dsriv = (numDiscIntStateVars) ?
+      numDiscIntStateVars - model.discrete_state_set_int_values().size() : 0;
+    if (num_dsriv) {
+      size_t di_start = model.adiv() - numDiscIntStateVars;
+      dsriv_l_bnds = IntVector(Teuchos::View,
+	const_cast<int*>(&all_di_l_bnds[di_start]), num_dsriv);
+      dsriv_u_bnds = IntVector(Teuchos::View,
+	const_cast<int*>(&all_di_u_bnds[di_start]), num_dsriv);
+    }
+    IntSetArray empty_isa; StringSetArray empty_ssa; RealSetArray empty_rsa;
+    const IntSetArray&    di_design_sets = (numDiscIntDesVars) ?
+      model.discrete_design_set_int_values()    : empty_isa;
+    const StringSetArray& ds_design_sets = (numDiscStringDesVars) ?
+      model.discrete_design_set_string_values() : empty_ssa;
+    const RealSetArray&   dr_design_sets = (numDiscRealDesVars) ?
+      model.discrete_design_set_real_values()   : empty_rsa;
+    const IntSetArray&    di_state_sets  = (numDiscIntStateVars) ?
+      model.discrete_state_set_int_values()     : empty_isa;
+    const StringSetArray& ds_state_sets  = (numDiscStringStateVars) ?
+      model.discrete_state_set_string_values()  : empty_ssa;
+    const RealSetArray&   dr_state_sets  = (numDiscRealStateVars) ?
+      model.discrete_state_set_real_values()    : empty_rsa;
+
+    // Call LHS to generate the specified samples within the specified
+    // distributions.  Use model distribution parameters unless ACTIVE
+    // excludes {aleatory,epistemic,both} uncertain variables.
+    if ( samplingVarsMode == ACTIVE &&
+	 ( model_view == RELAXED_DESIGN || model_view == RELAXED_STATE ||
+	   model_view ==   MIXED_DESIGN || model_view ==   MIXED_STATE ) ) {
+      Pecos::AleatoryDistParams empty_adp; Pecos::EpistemicDistParams empty_edp;
+      if ( !backfillFlag )
+	lhsDriver.generate_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+   	  ds_state_sets, dr_state_sets, empty_adp, empty_edp, numSamples,
+	  allSamples, sampleRanks);
+      else
+	lhsDriver.generate_unique_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+   	  ds_state_sets, dr_state_sets, empty_adp, empty_edp, numSamples,
+	  allSamples, sampleRanks);
+    }
+    else if ( samplingVarsMode == ACTIVE &&
+	      ( model_view == RELAXED_ALEATORY_UNCERTAIN ||
+		model_view == MIXED_ALEATORY_UNCERTAIN ) ) {
+      Pecos::EpistemicDistParams empty_edp;
+      if ( !backfillFlag )
+	lhsDriver.generate_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+  	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+	  ds_state_sets, dr_state_sets, model.aleatory_distribution_parameters(),
+	  empty_edp, numSamples, allSamples, sampleRanks);
+      else
+	lhsDriver.generate_unique_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+  	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+	  ds_state_sets, dr_state_sets, model.aleatory_distribution_parameters(),
+	  empty_edp, numSamples, allSamples, sampleRanks);
+    }
+    else if ( samplingVarsMode == ACTIVE &&
+	      ( model_view == RELAXED_EPISTEMIC_UNCERTAIN ||
+		model_view == MIXED_EPISTEMIC_UNCERTAIN ) ) {
+      Pecos::AleatoryDistParams empty_adp;
+      if ( !backfillFlag )
+	lhsDriver.generate_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+  	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+	  ds_state_sets, dr_state_sets, empty_adp,
+	  model.epistemic_distribution_parameters(), numSamples, allSamples,
+	  sampleRanks);
+      else
+	lhsDriver.generate_unique_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+  	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+	  ds_state_sets, dr_state_sets, empty_adp,
+	  model.epistemic_distribution_parameters(), numSamples, allSamples,
+	  sampleRanks);
+    }
+    else
+      {
+	if ( !backfillFlag )
+	  lhsDriver.generate_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+	  ds_state_sets, dr_state_sets, model.aleatory_distribution_parameters(),
+	  model.epistemic_distribution_parameters(), numSamples, allSamples,
+	  sampleRanks);
+	else
+	  lhsDriver.generate_unique_samples(cdv_l_bnds, cdv_u_bnds, ddriv_l_bnds,
+	  ddriv_u_bnds, di_design_sets, ds_design_sets, dr_design_sets,
+	  csv_l_bnds, csv_u_bnds, dsriv_l_bnds, dsriv_u_bnds, di_state_sets,
+	  ds_state_sets, dr_state_sets, model.aleatory_distribution_parameters(),
+	  model.epistemic_distribution_parameters(), numSamples, allSamples,
+	  sampleRanks);
+	  // warning sampleRanks will empty.
+	  // See comment in lhs_driver.cpp generate_unique_samples()
+      }
+    break;
+  }
+  }
+}
+
+
+/** This version of get_parameter_sets() does not extract data from the
+    user-defined model, but instead relies on the incoming bounded region
+    definition.  It only support a UNIFORM sampling mode, where the
+    distinction of ACTIVE_UNIFORM vs. ALL_UNIFORM is handled elsewhere. */
+void NonDSampling::
+get_parameter_sets(const RealVector& lower_bnds,
+		   const RealVector& upper_bnds)
+{
+  initialize_lhs(true);
+  lhsDriver.generate_uniform_samples(lower_bnds, upper_bnds,
+				     numSamples, allSamples);
+}
+
+
+void NonDSampling::
+update_model_from_sample(Model& model, const Real* sample_vars)
+{
+  size_t i, cntr = 0, cv_start, num_cv, div_start, num_div, dsv_start, num_dsv,
+    drv_start, num_drv;
+  mode_counts(model, cv_start, num_cv, div_start, num_div, dsv_start, num_dsv,
+	      drv_start, num_drv);
+
+  // sampled continuous vars (by value)
+  size_t end = cv_start + num_cv;
+  for (i=cv_start; i<end; ++i, ++cntr)
+    model.all_continuous_variable(sample_vars[cntr], i);
+  // sampled discrete int vars (by value cast from Real)
+  end = div_start + num_div;
+  for (i=div_start; i<end; ++i, ++cntr)
+    model.all_discrete_int_variable((int)sample_vars[cntr], i);
+  // sampled discrete string vars (by index cast from Real)
+  short active_view = model.current_variables().view().first;
+  bool relax = (active_view == RELAXED_ALL ||
+    ( active_view >= RELAXED_DESIGN && active_view <= RELAXED_STATE ) );
+  short all_view = (relax) ? RELAXED_ALL : MIXED_ALL;
+  const StringSetArray& all_dss_values
+    = model.discrete_set_string_values(all_view);
+  end = dsv_start + num_dsv;
+  for (i=dsv_start; i<end; ++i, ++cntr)
+    model.all_discrete_string_variable(set_index_to_value(
+      (size_t)sample_vars[cntr], all_dss_values[i]), i);
+  // sampled discrete real vars (by value)
+  end = drv_start + num_drv;
+  for (i=drv_start; i<end; ++i, ++cntr)
+    model.all_discrete_real_variable(sample_vars[cntr], i);
+}
+
+
+// BMA TODO: consolidate with other use cases
+void NonDSampling::
+sample_to_variables(const Real* sample_vars, Variables& vars)
+{
+  size_t i, cntr = 0, cv_start, num_cv, div_start, num_div, dsv_start, num_dsv,
+    drv_start, num_drv;
+  mode_counts(iteratedModel, cv_start, num_cv, div_start, num_div, dsv_start, num_dsv,
+	      drv_start, num_drv);
+
+  // BMA TODO: make sure inactive get updated too as needed?
+
+  // sampled continuous vars (by value)
+  size_t end = cv_start + num_cv;
+  for (i=cv_start; i<end; ++i, ++cntr)
+    vars.all_continuous_variable(sample_vars[cntr], i);
+  // sampled discrete int vars (by value cast from Real)
+  end = div_start + num_div;
+  for (i=div_start; i<end; ++i, ++cntr)
+    vars.all_discrete_int_variable((int)sample_vars[cntr], i);
+  // sampled discrete string vars (by index cast from Real)
+  short active_view = vars.view().first;
+  bool relax = (active_view == RELAXED_ALL ||
+    ( active_view >= RELAXED_DESIGN && active_view <= RELAXED_STATE ) );
+  short all_view = (relax) ? RELAXED_ALL : MIXED_ALL;
+  const StringSetArray& all_dss_values
+    = iteratedModel.discrete_set_string_values(all_view);
+  end = dsv_start + num_dsv;
+  for (i=dsv_start; i<end; ++i, ++cntr)
+    vars.all_discrete_string_variable(set_index_to_value(
+      (size_t)sample_vars[cntr], all_dss_values[i]), i);
+  // sampled discrete real vars (by value)
+  end = drv_start + num_drv;
+  for (i=drv_start; i<end; ++i, ++cntr)
+    vars.all_discrete_real_variable(sample_vars[cntr], i);
+}
+
+
+// BMA TODO: consolidate with other use cases
+/** Map the active variables from vars to sample_vars (column in allSamples) */
+void NonDSampling::
+variables_to_sample(const Variables& vars, Real* sample_vars)
+{
+  size_t cntr = 0;
+
+  const RealVector& c_vars = vars.continuous_variables();
+  for (size_t j=0; j<numContinuousVars; ++j, ++cntr)
+    sample_vars[cntr] = c_vars[j]; // jth row of samples_matrix
+
+  const IntVector& di_vars = vars.discrete_int_variables();
+  for (size_t j=0; j<numDiscreteIntVars; ++j, ++cntr)
+    sample_vars[cntr] = (Real) di_vars[j]; // jth row of samples_matrix
+
+  // to help with mapping string variables
+  // sampled discrete string vars (by index cast from Real)
+  short active_view = vars.view().first;
+  bool relax = (active_view == RELAXED_ALL ||
+    ( active_view >= RELAXED_DESIGN && active_view <= RELAXED_STATE ) );
+  short all_view = (relax) ? RELAXED_ALL : MIXED_ALL;
+  const StringSetArray& all_dss_values
+    = iteratedModel.discrete_set_string_values(all_view);
+
+  // is care needed to manage active vs. all string variables?
+
+  StringMultiArrayConstView ds_vars = vars.discrete_string_variables();
+  for (size_t j=0; j<numDiscreteStringVars; ++j, ++cntr)
+    sample_vars[cntr] =
+      (Real) set_value_to_index(ds_vars[j], all_dss_values[j]); // jth row of samples_matrix
+
+  const RealVector& dr_vars = vars.discrete_real_variables();
+  for (size_t j=0; j<numDiscreteRealVars; ++j, ++cntr)
+    sample_vars[cntr] = (Real) dr_vars[j]; // jth row of samples_matrix
+}
+
+
+/** This function and its helpers to follow are needed since NonDSampling
+    supports a richer set of sampling modes than just the active variable
+    subset.  mode_counts() manages the samplingVarsMode setting, while its
+    helper functions (view_{design,aleatory_uncertain,epistemic_uncertain,
+    uncertain,state}_counts) manage the active variables view.  Similar
+    to the computation of starts and counts in creating active variable
+    views, the results of this function are starts and counts for use
+    within model.all_*() set/get functions. */
+void NonDSampling::
+mode_counts(const Model& model, size_t& cv_start,  size_t& num_cv,
+	    size_t& div_start,  size_t& num_div,   size_t& dsv_start,
+	    size_t& num_dsv,    size_t& drv_start, size_t& num_drv) const
+{
+  cv_start = div_start = dsv_start = drv_start = 0;
+  num_cv   = num_div   = num_dsv   = num_drv   = 0;
+  switch (samplingVarsMode) {
+  case ALEATORY_UNCERTAIN:
+    // design vars define starting indices
+    view_design_counts(model, cv_start, div_start, dsv_start, drv_start);
+    // A uncertain vars define counts
+    view_aleatory_uncertain_counts(model, num_cv, num_div, num_dsv, num_drv);
+    break;
+  case ALEATORY_UNCERTAIN_UNIFORM: {
+    // UNIFORM views do not currently support non-relaxed discrete
+    size_t dummy;
+    // continuous design vars define starting indices
+    view_design_counts(model, cv_start, dummy, dummy, dummy);
+    // continuous A uncertain vars define counts
+    view_aleatory_uncertain_counts(model, num_cv, dummy, dummy, dummy);   break;
+  }
+  case EPISTEMIC_UNCERTAIN: {
+    // design + A uncertain vars define starting indices
+    size_t num_cdv,  num_ddiv,  num_ddsv,  num_ddrv,
+           num_cauv, num_dauiv, num_dausv, num_daurv;
+    view_design_counts(model, num_cdv, num_ddiv, num_ddsv, num_ddrv);
+    view_aleatory_uncertain_counts(model, num_cauv, num_dauiv, num_dausv,
+				   num_daurv);
+    cv_start  = num_cdv  + num_cauv;  div_start = num_ddiv + num_dauiv;
+    dsv_start = num_ddsv + num_dausv; drv_start = num_ddrv + num_daurv;
+    // E uncertain vars define counts
+    view_epistemic_uncertain_counts(model, num_cv, num_div, num_dsv, num_drv);
+    break;
+  }
+  case EPISTEMIC_UNCERTAIN_UNIFORM: {
+    // UNIFORM views do not currently support non-relaxed discrete
+    // continuous design + A uncertain vars define starting indices
+    size_t num_cdv, num_cauv, dummy;
+    view_design_counts(model, num_cdv, dummy, dummy, dummy);
+    view_aleatory_uncertain_counts(model, num_cauv, dummy, dummy, dummy);
+    cv_start = num_cdv + num_cauv;
+    // continuous E uncertain vars define counts
+    view_epistemic_uncertain_counts(model, num_cv, dummy, dummy, dummy);  break;
+  }
+  case UNCERTAIN:
+    // design vars define starting indices
+    view_design_counts(model, cv_start, div_start, dsv_start, drv_start);
+    // A+E uncertain vars define counts
+    view_uncertain_counts(model, num_cv, num_div, num_dsv, num_drv);      break;
+  case UNCERTAIN_UNIFORM: {
+    // UNIFORM views do not currently support non-relaxed discrete
+    size_t dummy;
+    // continuous design vars define starting indices
+    view_design_counts(model, cv_start, dummy, dummy, dummy);
+    // continuous A+E uncertain vars define counts
+    view_uncertain_counts(model, num_cv, dummy, dummy, dummy);            break;
+  }
+  case ACTIVE: {
+    const Variables& vars = model.current_variables();
+    cv_start  = vars.cv_start();  num_cv  = vars.cv();
+    div_start = vars.div_start(); num_div = vars.div();
+    dsv_start = vars.dsv_start(); num_dsv = vars.dsv();
+    drv_start = vars.drv_start(); num_drv = vars.drv();                   break;
+  }
+  case ACTIVE_UNIFORM: {
+    // UNIFORM views do not currently support non-relaxed discrete
+    const Variables& vars = model.current_variables();
+    cv_start = vars.cv_start(); num_cv = vars.cv();                       break;
+  }
+  case ALL:
+    num_cv  = model.acv();  num_div = model.adiv();
+    num_dsv = model.adsv(); num_drv = model.adrv();                       break;
+  case ALL_UNIFORM:
+    // UNIFORM views do not currently support non-relaxed discrete
+    num_cv = model.acv();                                                 break;
+  }
+}
+
+
+/** This function computes total design variable counts, not active counts,
+    for use in defining offsets and counts within all variables arrays. */
+void NonDSampling::
+view_design_counts(const Model& model, size_t& num_cdv, size_t& num_ddiv,
+		   size_t& num_ddsv, size_t& num_ddrv) const
+{
+  const Variables& vars = model.current_variables();
+  short active_view = vars.view().first;
+  switch (active_view) {
+  case RELAXED_ALL: case MIXED_ALL: case RELAXED_DESIGN: case MIXED_DESIGN:
+    // design vars are included in active counts from NonD and relaxation
+    // counts have already been applied
+    num_cdv  = numContDesVars;       num_ddiv = numDiscIntDesVars;
+    num_ddsv = numDiscStringDesVars; num_ddrv = numDiscRealDesVars; break;
+  case RELAXED_EPISTEMIC_UNCERTAIN: case RELAXED_STATE:
+  case MIXED_EPISTEMIC_UNCERTAIN: case MIXED_STATE:
+    // design vars are not included in active counts from NonD
+    vars.shared_data().design_counts(num_cdv, num_ddiv, num_ddsv, num_ddrv);
+    break;
+  case RELAXED_UNCERTAIN: case RELAXED_ALEATORY_UNCERTAIN:
+  case   MIXED_UNCERTAIN: case   MIXED_ALEATORY_UNCERTAIN:
+    num_cdv  = vars.cv_start();  num_ddiv = vars.div_start();
+    num_ddsv = vars.dsv_start(); num_ddrv = vars.drv_start(); break;
+  }
+}
+
+
+/** This function computes total aleatory uncertain variable counts,
+    not active counts, for use in defining offsets and counts within
+    all variables arrays. */
+void NonDSampling::
+view_aleatory_uncertain_counts(const Model& model, size_t& num_cauv,
+			       size_t& num_dauiv, size_t& num_dausv,
+			       size_t& num_daurv) const
+{
+  const Variables& vars = model.current_variables();
+  short active_view = vars.view().first;
+  switch (active_view) {
+  case RELAXED_ALL: case RELAXED_UNCERTAIN: case RELAXED_ALEATORY_UNCERTAIN:
+  case   MIXED_ALL: case   MIXED_UNCERTAIN: case   MIXED_ALEATORY_UNCERTAIN:
+    // aleatory vars are included in active counts from NonD and relaxation
+    // counts have already been applied
+    num_cauv  = numContAleatUncVars;       num_dauiv = numDiscIntAleatUncVars;
+    num_dausv = numDiscStringAleatUncVars; num_daurv = numDiscRealAleatUncVars;
+    break;
+  case RELAXED_DESIGN: case RELAXED_STATE: case RELAXED_EPISTEMIC_UNCERTAIN:
+  case MIXED_DESIGN:   case MIXED_STATE:   case MIXED_EPISTEMIC_UNCERTAIN:
+    vars.shared_data().aleatory_uncertain_counts(num_cauv,  num_dauiv,
+						 num_dausv, num_daurv);
+    break;
+  }
+}
+
+
+/** This function computes total epistemic uncertain variable counts,
+    not active counts, for use in defining offsets and counts within
+    all variables arrays. */
+void NonDSampling::
+view_epistemic_uncertain_counts(const Model& model, size_t& num_ceuv,
+				size_t& num_deuiv, size_t& num_deusv,
+				size_t& num_deurv) const
+{
+  const Variables& vars = model.current_variables();
+  short active_view = vars.view().first;
+  switch (active_view) {
+  case RELAXED_ALL: case RELAXED_UNCERTAIN: case RELAXED_EPISTEMIC_UNCERTAIN:
+  case   MIXED_ALL: case   MIXED_UNCERTAIN: case   MIXED_EPISTEMIC_UNCERTAIN:
+    num_ceuv  = numContEpistUncVars;       num_deuiv = numDiscIntEpistUncVars;
+    num_deusv = numDiscStringEpistUncVars; num_deurv = numDiscRealEpistUncVars;
+    break;
+  case RELAXED_DESIGN: case RELAXED_ALEATORY_UNCERTAIN: case RELAXED_STATE:
+  case MIXED_DESIGN:   case MIXED_ALEATORY_UNCERTAIN:   case MIXED_STATE:
+    vars.shared_data().epistemic_uncertain_counts(num_ceuv,  num_deuiv,
+						  num_deusv, num_deurv);
+    break;
+  }
+}
+
+
+/** This function computes total uncertain variable counts, not active counts,
+    for use in defining offsets and counts within all variables arrays. */
+void NonDSampling::
+view_uncertain_counts(const Model& model, size_t& num_cuv, size_t& num_duiv,
+		      size_t& num_dusv, size_t& num_durv) const
+{
+  const Variables& vars = model.current_variables();
+  short active_view = vars.view().first;
+  switch (active_view) {
+  case RELAXED_ALL: case MIXED_ALL: // UNCERTAIN = subset of ACTIVE
+    num_cuv  = numContAleatUncVars       + numContEpistUncVars;
+    num_duiv = numDiscIntAleatUncVars    + numDiscIntEpistUncVars;
+    num_dusv = numDiscStringAleatUncVars + numDiscStringEpistUncVars;
+    num_durv = numDiscRealAleatUncVars   + numDiscRealEpistUncVars;      break;
+  case RELAXED_DESIGN:             case RELAXED_STATE:
+  case RELAXED_ALEATORY_UNCERTAIN: case RELAXED_EPISTEMIC_UNCERTAIN:
+  case MIXED_DESIGN:               case MIXED_STATE:
+  case MIXED_ALEATORY_UNCERTAIN:   case MIXED_EPISTEMIC_UNCERTAIN:
+    vars.shared_data().uncertain_counts(num_cuv, num_duiv, num_dusv, num_durv);
+    break;
+  case RELAXED_UNCERTAIN: case MIXED_UNCERTAIN: // UNCERTAIN = same as ACTIVE
+    num_cuv  = vars.cv();  num_duiv = vars.div();
+    num_dusv = vars.dsv(); num_durv = vars.drv(); break;
+  }
+}
+
+
+void NonDSampling::
+view_state_counts(const Model& model, size_t& num_csv, size_t& num_dsiv,
+		  size_t& num_dssv, size_t& num_dsrv) const
+{
+  const Variables& vars = model.current_variables();
+  short active_view = vars.view().first;
+  switch (active_view) {
+  case RELAXED_ALL: case MIXED_ALL: case RELAXED_STATE: case MIXED_STATE:
+    // state vars are included in active counts from NonD and relaxation
+    // counts have already been applied
+    num_csv  = numContStateVars;       num_dsiv = numDiscIntStateVars;
+    num_dssv = numDiscStringStateVars; num_dsrv = numDiscRealStateVars; break;
+  case RELAXED_ALEATORY_UNCERTAIN: case RELAXED_DESIGN:
+  case   MIXED_ALEATORY_UNCERTAIN: case   MIXED_DESIGN:
+    // state vars are not included in active counts from NonD
+    vars.shared_data().state_counts(num_csv, num_dsiv, num_dssv, num_dsrv);
+    break;
+  case RELAXED_UNCERTAIN: case RELAXED_EPISTEMIC_UNCERTAIN:
+  case   MIXED_UNCERTAIN: case   MIXED_EPISTEMIC_UNCERTAIN:
+    num_csv  = vars.acv()  - vars.cv_start()  - vars.cv();
+    num_dsiv = vars.adiv() - vars.div_start() - vars.div();
+    num_dssv = vars.adsv() - vars.dsv_start() - vars.dsv();
+    num_dsrv = vars.adrv() - vars.drv_start() - vars.drv(); break;
+  }
+}
+
+
+void NonDSampling::initialize_lhs(bool write_message)
+{
+  // keep track of number of LHS executions for this object
+  ++numLHSRuns;
+
+  // Set seed value for input to LHS's random number generator.  Emulate DDACE
+  // behavior in which a user-specified seed gives you repeatable behavior but
+  // no specification gives you random behavior.  A system clock is used to
+  // randomize in the no user specification case.  For cases where
+  // get_parameter_sets() may be called multiple times for the same sampling
+  // iterator (e.g., SBO), support a deterministic sequence of seed values.
+  // This renders the study repeatable but the sampling pattern varies from
+  // one run to the next.
+  if (numLHSRuns == 1) { // set initial seed
+    lhsDriver.rng(rngName);
+    if (!seedSpec) // no user specification --> nonrepeatable behavior
+      randomSeed = generate_system_seed();
+    lhsDriver.seed(randomSeed);
+  }
+  else if (varyPattern) // define sequence of seed values for numLHSRuns > 1
+    lhsDriver.advance_seed_sequence();
+  else // fixed_seed
+    lhsDriver.seed(randomSeed); // reset original/machine-generated seed
+
+  // Needed a way to turn this off when LHS sampling is being used in
+  // NonDAdaptImpSampling because it gets written a _LOT_
+  String sample_string = submethod_enum_to_string(sampleType);
+  if (write_message) {
+    Cout << "\nNonD " << sample_string << " Samples = " << numSamples;
+    if (numLHSRuns == 1 || !varyPattern) {
+      if (seedSpec) Cout << " Seed (user-specified) = ";
+      else          Cout << " Seed (system-generated) = ";
+      Cout << randomSeed << '\n';
+    }
+    else if (rngName == "rnum2") {
+      if (seedSpec) Cout << " Seed (sequence from user-specified) = ";
+      else          Cout << " Seed (sequence from system-generated) = ";
+      Cout << lhsDriver.seed() << '\n';
+    }
+    else // default Boost Mersenne twister
+      Cout << " Seed not reset from previous LHS execution\n";
+  }
+
+  lhsDriver.initialize(sample_string, sampleRanksMode, !subIteratorFlag);
+}
+
+
+void NonDSampling::
+compute_statistics(const RealMatrix&     vars_samples,
+		   const IntResponseMap& resp_samples)
+{
+  StringMultiArrayConstView
+    acv_labels  = iteratedModel.all_continuous_variable_labels(),
+    adiv_labels = iteratedModel.all_discrete_int_variable_labels(),
+    adsv_labels = iteratedModel.all_discrete_string_variable_labels(),
+    adrv_labels = iteratedModel.all_discrete_real_variable_labels();
+  size_t cv_start, num_cv, div_start, num_div, dsv_start, num_dsv,
+    drv_start, num_drv;
+  mode_counts(iteratedModel, cv_start, num_cv, div_start, num_div,
+	      dsv_start, num_dsv, drv_start, num_drv);
+  StringMultiArrayConstView
+    cv_labels  =
+      acv_labels[boost::indices[idx_range(cv_start, cv_start+num_cv)]],
+    div_labels =
+      adiv_labels[boost::indices[idx_range(div_start, div_start+num_div)]],
+    dsv_labels =
+      adsv_labels[boost::indices[idx_range(dsv_start, dsv_start+num_dsv)]],
+    drv_labels =
+      adrv_labels[boost::indices[idx_range(drv_start, drv_start+num_drv)]];
+
+  // archive the active variables with the results
+  if (resultsDB.active()) {
+    if (num_cv)
+      resultsDB.insert(run_identifier(), resultsNames.cv_labels, cv_labels);
+    if (num_div)
+      resultsDB.insert(run_identifier(), resultsNames.div_labels, div_labels);
+    if (num_dsv)
+      resultsDB.insert(run_identifier(), resultsNames.dsv_labels, dsv_labels);
+    if (num_drv)
+      resultsDB.insert(run_identifier(), resultsNames.drv_labels, drv_labels);
+    resultsDB.insert(run_identifier(), resultsNames.fn_labels,
+		     iteratedModel.response_labels());
+  }
+
+  if (epistemicStats) // Epistemic/mixed
+    compute_intervals(resp_samples); // compute min/max response intervals
+  else { // Aleatory
+    // compute means and std deviations with confidence intervals
+    compute_moments(resp_samples);
+    // compute CDF/CCDF mappings of z to p/beta and p/beta to z
+    if (totalLevelRequests)
+      compute_distribution_mappings(resp_samples);
+  }
+
+  // Don't compute for now: too expensive
+  // if (!subIteratorFlag) {
+  //   nonDSampCorr.compute_correlations(vars_samples, resp_samples);
+  //   // archive the correlations to the results DB
+  //   nonDSampCorr.archive_correlations(run_identifier(), resultsDB, cv_labels,
+		// 		      div_labels, dsv_labels, drv_labels,
+		// 		      iteratedModel.response_labels());
+  // }
+  if (!finalStatistics.is_null())
+    update_final_statistics();
+}
+
+
+void NonDSampling::compute_intervals(const IntResponseMap& samples)
+{
+  // For the samples array, calculate min/max response intervals
+
+  using boost::math::isfinite;
+  size_t i, j, num_obs = samples.size(), num_samp;
+  const StringArray& resp_labels = iteratedModel.response_labels();
+
+  if (extremeValues.empty()) extremeValues.shapeUninitialized(2, numFunctions);
+  IntRespMCIter it;
+  for (i=0; i<numFunctions; ++i) {
+    num_samp = 0;
+    Real min = DBL_MAX, max = -DBL_MAX;
+    for (it=samples.begin(); it!=samples.end(); ++it) {
+      const Real& sample = it->second.function_value(i);
+      if (isfinite(sample)) { // neither NaN nor +/-Inf
+	if (sample < min) min = sample;
+	if (sample > max) max = sample;
+	++num_samp;
+      }
+    }
+    extremeValues(0, i) = min;
+    extremeValues(1, i) = max;
+    if (num_samp != num_obs)
+      Cerr << "Warning: sampling statistics for " << resp_labels[i] << " omit "
+	   << num_obs-num_samp << " failed evaluations out of " << num_obs
+	   << " samples.\n";
+  }
+
+  if (resultsDB.active()) {
+    MetaDataType md;
+    md["Row Labels"] = make_metadatavalue("Min", "Max");
+    md["Column Labels"] = make_metadatavalue(resp_labels);
+    resultsDB.insert(run_identifier(), resultsNames.extreme_values,
+		     extremeValues, md);
+  }
+}
+
+
+void NonDSampling::compute_moments(const IntResponseMap& samples)
+{
+  // For the samples array, calculate means and standard deviations
+  // with confidence intervals
+
+  using boost::math::isfinite;
+  size_t i, j, num_obs = samples.size(), num_samp;
+  Real sum, var, skew, kurt;
+  const StringArray& resp_labels = iteratedModel.response_labels();
+
+  if (momentStats.empty()) momentStats.shapeUninitialized(4, numFunctions);
+  if (momentCIs.empty()) momentCIs.shapeUninitialized(4, numFunctions);
+
+  IntRespMCIter it;
+  for (i=0; i<numFunctions; ++i) {
+
+    num_samp  = 0;
+    sum = var = skew = kurt = 0.;
+    // means
+    for (it=samples.begin(); it!=samples.end(); ++it) {
+      const Real& sample = it->second.function_value(i);
+      if (isfinite(sample)) { // neither NaN nor +/-Inf
+	sum += sample;
+	++num_samp;
+      }
+    }
+
+    if (num_samp != num_obs)
+      Cerr << "Warning: sampling statistics for " << resp_labels[i] << " omit "
+	   << num_obs-num_samp << " failed evaluations out of " << num_obs
+	   << " samples.\n";
+    if (!num_samp) {
+      Cerr << "Error: Number of samples for " << resp_labels[i]
+	   << " must be nonzero for moment calculation in NonDSampling::"
+	   << "compute_statistics()." << std::endl;
+      abort_handler(-1);
+    }
+
+    Real* moments_i = momentStats[i];
+    Real& mean = moments_i[0];
+    mean = sum/((Real)num_samp);
+
+    // accumulate variance, skewness, and kurtosis
+    Real centered_fn, pow_fn;
+    for (it=samples.begin(); it!=samples.end(); ++it) {
+      const Real& sample = it->second.function_value(i);
+      if (isfinite(sample)) { // neither NaN nor +/-Inf
+	pow_fn  = centered_fn = sample - mean;
+	pow_fn *= centered_fn; var  += pow_fn;
+	pow_fn *= centered_fn; skew += pow_fn;
+	pow_fn *= centered_fn; kurt += pow_fn;
+      }
+    }
+
+    // sample std deviation
+    Real& std_dev = moments_i[1];
+    std_dev = (num_samp > 1) ? std::sqrt(var/(Real)(num_samp-1)) : 0.;
+
+    // skewness
+    moments_i[2] = (num_samp > 2 && var > 0.) ?
+      // sample skewness
+      skew/(Real)num_samp/std::pow(var/(Real)num_samp,1.5) *
+      // population skewness
+      std::sqrt((Real)(num_samp*(num_samp-1)))/(Real)(num_samp-2) :
+      // for no variation, central moment is zero
+      0.;
+
+    // kurtosis
+    moments_i[3] = (num_samp > 3 && var > 0.) ?
+      // sample kurtosis
+      (Real)((num_samp+1)*num_samp*(num_samp-1))*kurt/
+      (Real)((num_samp-2)*(num_samp-3)*var*var) -
+      // population kurtosis
+      3.*std::pow((Real)(num_samp-1),2)/(Real)((num_samp-2)*(num_samp-3)) :
+      // for no variation, central moment is zero minus excess kurtosis
+      -3.;
+
+    if (num_samp > 1) {
+      // 95% confidence intervals (2-sided interval, not 1-sided limit)
+      Real dof = num_samp - 1;
+//#ifdef HAVE_BOOST
+      // mean: the better formula does not assume known variance but requires
+      // a function for the Student's t-distr. with (num_samp-1) degrees of
+      // freedom (Haldar & Mahadevan, p. 127).
+      Pecos::students_t_dist t_dist(dof);
+      Real mean_ci_delta =
+	std_dev*bmth::quantile(t_dist,0.975)/std::sqrt((Real)num_samp);
+      momentCIs(0,i) = mean - mean_ci_delta;
+      momentCIs(1,i) = mean + mean_ci_delta;
+      // std dev: chi-square distribution with (num_samp-1) degrees of freedom
+      // (Haldar & Mahadevan, p. 132).
+      Pecos::chi_squared_dist chisq(dof);
+      momentCIs(2,i) = std_dev*std::sqrt(dof/bmth::quantile(chisq, 0.975));
+      momentCIs(3,i) = std_dev*std::sqrt(dof/bmth::quantile(chisq, 0.025));
+/*
+#elif HAVE_GSL
+      // mean: the better formula does not assume known variance but requires
+      // a function for the Student's t-distr. with (num_samp-1) degrees of
+      // freedom (Haldar & Mahadevan, p. 127).
+      mean95CIDeltas[i]
+	= std_dev*gsl_cdf_tdist_Pinv(0.975,dof)/std::sqrt((Real)num_samp);
+      // std dev: chi-square distribution with (num_samp-1) degrees of freedom
+      // (Haldar & Mahadevan, p. 132).
+      stdDev95CILowerBnds[i]
+        = std_dev*std::sqrt(dof/gsl_cdf_chisq_Pinv(0.975, dof));
+      stdDev95CIUpperBnds[i]
+        = std_dev*std::sqrt(dof/gsl_cdf_chisq_Pinv(0.025, dof));
+#else
+      // mean: k_(alpha/2) = Phi^(-1)(0.975) = 1.96 (Haldar & Mahadevan,
+      // p. 123).  This simple formula assumes a known variance, which
+      // requires a sample of sufficient size (i.e., greater than 10).
+      mean95CIDeltas[i] = 1.96*std_dev/std::sqrt((Real)num_samp);
+#endif // HAVE_BOOST
+*/
+    }
+    else
+      momentCIs(0,i) = momentCIs(1,i) = momentCIs(2,i) = momentCIs(3,i) = 0.0;
+  }
+
+  if (resultsDB.active()) {
+    // archive the moments to results DB
+    MetaDataType md_moments;
+    md_moments["Row Labels"] =
+      make_metadatavalue("Mean", "Standard Deviation", "Skewness", "Kurtosis");
+    md_moments["Column Labels"] = make_metadatavalue(resp_labels);
+    resultsDB.insert(run_identifier(), resultsNames.moments_std,
+		     momentStats, md_moments);
+    // archive the confidence intervals to results DB
+    MetaDataType md;
+    md["Row Labels"] =
+      make_metadatavalue("LowerCI_Mean", "UpperCI_Mean", "LowerCI_StdDev",
+			 "UpperCI_StdDev");
+    md["Column Labels"] = make_metadatavalue(resp_labels);
+    resultsDB.insert(run_identifier(), resultsNames.moment_cis, momentCIs, md);
+  }
+}
+
+
+void NonDSampling::compute_distribution_mappings(const IntResponseMap& samples)
+{
+  // Size the output arrays here instead of in the ctor in order to support
+  // alternate sampling ctors.
+  initialize_distribution_mappings();
+  archive_allocate_mappings();
+  if (pdfOutput) {
+    computedPDFAbscissas.resize(numFunctions);
+    computedPDFOrdinates.resize(numFunctions);
+    archive_allocate_pdf();
+  }
+
+  // For the samples array, calculate the following statistics:
+  // > CDF/CCDF mappings of response levels to probability/reliability levels
+  // > CDF/CCDF mappings of probability/reliability levels to response levels
+  using boost::math::isfinite;
+  size_t i, j, k, num_obs = samples.size(), num_samp, bin_accumulator;
+  const StringArray& resp_labels = iteratedModel.response_labels();
+  RealArray sorted_samples; // STL-based array for sorting
+  SizetArray bins; Real min, max;
+
+  // check if moments are required, and if so, compute them now
+  if (momentStats.empty()) {
+    bool need_moments = false;
+    for (i=0; i<numFunctions; ++i)
+      if ( !requestedRelLevels[i].empty() ||
+	   ( !requestedRespLevels[i].empty() &&
+	     respLevelTarget == RELIABILITIES ) )
+	{ need_moments = true; break; }
+    if (need_moments) {
+      Cerr << "Error: required moments not available in compute_distribution_"
+	   << "mappings().  Call compute_moments() first." << std::endl;
+      abort_handler(-1);
+      // Issue with the following approach is that subsequent invocations of
+      // compute_distribution_mappings() without compute_moments() would not
+      // be detected and old moments would be used.  Performing more rigorous
+      // bookkeeping of moment updates is overkill for current use cases.
+      //Cerr << "Warning: moments not available in compute_distribution_"
+      //     << "mappings(); computing them now." << std::endl;
+      //compute_moments(samples);
+    }
+  }
+
+  IntRespMCIter it;
+  for (i=0; i<numFunctions; ++i) {
+
+    // CDF/CCDF mappings: z -> p/beta/beta* and p/beta/beta* -> z
+    size_t rl_len = requestedRespLevels[i].length(),
+           pl_len = requestedProbLevels[i].length(),
+           bl_len = requestedRelLevels[i].length(),
+           gl_len = requestedGenRelLevels[i].length();
+
+    // ----------------------------------------------------------------------
+    // Preliminaries: define finite subset, sort (if needed), and bin samples
+    // ----------------------------------------------------------------------
+    num_samp = 0;
+    if (pl_len || gl_len) { // sort samples array for p/beta* -> z mappings
+      sorted_samples.clear(); sorted_samples.reserve(num_obs);
+      for (it=samples.begin(); it!=samples.end(); ++it) {
+	const Real& sample = it->second.function_value(i);
+	if (isfinite(sample))
+	  { ++num_samp; sorted_samples.push_back(sample); }
+      }
+      // sort in ascending order
+      std::sort(sorted_samples.begin(), sorted_samples.end());
+      if (pdfOutput)
+	{ min = sorted_samples[0]; max = sorted_samples[num_samp-1]; }
+      // in case of rl_len mixed with pl_len/gl_len, bin using sorted array.
+      // Note: all bins open on right end due to use of less than.
+      if (rl_len && respLevelTarget != RELIABILITIES) {
+	const RealVector& req_rl_i = requestedRespLevels[i];
+        bins.assign(rl_len+1, 0); size_t samp_cntr = 0;
+	for (j=0; j<rl_len; ++j)
+	  while (samp_cntr<num_samp && sorted_samples[samp_cntr]<req_rl_i[j])
+	    { ++bins[j]; ++samp_cntr; }
+	if (num_samp > samp_cntr)
+	  bins[rl_len] += num_samp - samp_cntr;
+      }
+    }
+    else if (rl_len && respLevelTarget != RELIABILITIES) {
+      // in case of rl_len without pl_len/gl_len, bin from original sample set
+      const RealVector& req_rl_i = requestedRespLevels[i];
+      bins.assign(rl_len+1, 0); min = DBL_MAX; max = -DBL_MAX;
+      for (it=samples.begin(); it!=samples.end(); ++it) {
+	const Real& sample = it->second.function_value(i);
+	if (isfinite(sample)) {
+	  ++num_samp;
+	  if (pdfOutput) {
+	    if (sample < min) min = sample;
+	    if (sample > max) max = sample;
+	  }
+	  // 1st PDF bin from -inf to 1st resp lev; last PDF bin from last resp
+	  // lev to +inf. Note: all bins open on right end due to use of <.
+	  bool found = false;
+	  for (k=0; k<rl_len; ++k)
+	    if (sample < req_rl_i[k])
+	      { ++bins[k]; found = true; break; }
+	  if (!found)
+	    ++bins[rl_len];
+	}
+      }
+    }
+
+    // ----------------
+    // Process mappings
+    // ----------------
+    if (rl_len) {
+      switch (respLevelTarget) {
+      case PROBABILITIES: case GEN_RELIABILITIES: {
+	// z -> p/beta* (based on binning)
+	bin_accumulator = 0;
+	for (j=0; j<rl_len; ++j) { // compute CDF/CCDF p/beta*
+	  bin_accumulator += bins[j];
+	  Real cdf_prob = (Real)bin_accumulator/(Real)num_samp;
+	  Real computed_prob = (cdfFlag) ? cdf_prob : 1. - cdf_prob;
+	  if (respLevelTarget == PROBABILITIES)
+	    computedProbLevels[i][j]   =  computed_prob;
+	  else
+	    computedGenRelLevels[i][j] = -Pecos::Phi_inverse(computed_prob);
+	}
+	break;
+      }
+      case RELIABILITIES: { // z -> beta (based on moment projection)
+	Real& mean = momentStats(0,i); Real& std_dev = momentStats(1,i);
+	for (j=0; j<rl_len; j++) {
+	  const Real& z = requestedRespLevels[i][j];
+	  if (std_dev > Pecos::SMALL_NUMBER) {
+	    Real ratio = (mean - z)/std_dev;
+	    computedRelLevels[i][j] = (cdfFlag) ? ratio : -ratio;
+	  }
+	  else
+	    computedRelLevels[i][j]
+	      = ( (cdfFlag && mean <= z) || (!cdfFlag && mean > z) )
+	      ? -Pecos::LARGE_NUMBER : Pecos::LARGE_NUMBER;
+	}
+	break;
+      }
+      }
+    }
+    for (j=0; j<pl_len+gl_len; j++) { // p/beta* -> z
+      Real p = (j<pl_len) ? requestedProbLevels[i][j] :
+	Pecos::Phi(-requestedGenRelLevels[i][j-pl_len]);
+      // since each sample has 1/N probability, a probability level can be
+      // directly converted to an index within a sorted array (index =~ p * N)
+      Real cdf_p_x_obs = (cdfFlag) ? p*(Real)num_samp : (1.-p)*(Real)num_samp;
+      // convert to an int and round down using std::floor().  Apply a small
+      // numerical adjustment so that probabilities on the boundaries
+      // (common with round probabilities and factor of 10 samples)
+      // are consistently rounded down (consistent with CDF p(g<=z)).
+      Real order = (cdf_p_x_obs > .9)
+	         ? std::pow(10., ceil(std::log10(cdf_p_x_obs))) : 0.;
+      int index = (int)std::floor(cdf_p_x_obs - order*DBL_EPSILON);
+      // clip at array ends due to possible roundoff effects
+      if (index < 0)         index = 0;
+      if (index >= num_samp) index = num_samp - 1;
+      if (j<pl_len)
+	computedRespLevels[i][j] = sorted_samples[index];
+      else
+	computedRespLevels[i][j+bl_len] = sorted_samples[index];
+    }
+    if (bl_len) {
+      Real& mean = momentStats(0,i); Real& std_dev = momentStats(1,i);
+      for (j=0; j<bl_len; j++) { // beta -> z
+	const Real& beta = requestedRelLevels[i][j];
+	computedRespLevels[i][j+pl_len] = (cdfFlag) ?
+	  mean - beta * std_dev : mean + beta * std_dev;
+      }
+    }
+
+    // archive the mappings from response levels
+    archive_from_resp(i);
+    // archive the mappings to response levels
+    archive_to_resp(i);
+
+    // ---------------------------------------------------------------------
+    // Post-process for PDF incorporating all requested/computed resp levels
+    // ---------------------------------------------------------------------
+    if (pdfOutput) {
+      size_t req_comp_rl_len = pl_len + gl_len;
+      if (respLevelTarget != RELIABILITIES) req_comp_rl_len += rl_len;
+      if (req_comp_rl_len) {
+	RealVector pdf_all_rlevs;
+	if (pl_len || gl_len) {
+	  // merge all requested & computed rlevs into pdf rlevs and sort
+	  pdf_all_rlevs.sizeUninitialized(req_comp_rl_len);
+	  // merge requested/computed --> pdf_all_rlevs
+	  int offset = 0;
+	  if (rl_len && respLevelTarget != RELIABILITIES) {
+	    copy_data_partial(requestedRespLevels[i], pdf_all_rlevs, 0);
+	    offset += rl_len;
+	  }
+	  if (pl_len) {
+	    copy_data_partial(computedRespLevels[i], 0, (int)pl_len,
+			      pdf_all_rlevs, offset);
+	    offset += pl_len;
+	  }
+	  if (gl_len)
+	    copy_data_partial(computedRespLevels[i], (int)(pl_len+bl_len),
+			      (int)gl_len, pdf_all_rlevs, offset);
+	  // sort combined array; retain unique entries; update req_comp_rl_len
+	  Real* start = pdf_all_rlevs.values();
+	  std::sort(start, start+req_comp_rl_len);
+	  req_comp_rl_len = std::distance(start,
+	    std::unique(start, start+req_comp_rl_len));
+	  // (re)compute bins from sorted_samples.  Note that these bins are
+	  // open on right end due to use of strictly less than.
+	  bins.assign(req_comp_rl_len+1, 0); size_t samp_cntr = 0;
+	  for (j=0; j<req_comp_rl_len; ++j)
+	    while (samp_cntr < num_samp &&
+		   sorted_samples[samp_cntr] < pdf_all_rlevs[j])
+	      { ++bins[j]; ++samp_cntr; }
+	  if (num_samp > samp_cntr)
+	    bins[req_comp_rl_len] += num_samp - samp_cntr;
+	}
+	RealVector& pdf_rlevs = (pl_len || gl_len) ?
+	  pdf_all_rlevs : requestedRespLevels[i];
+	size_t last_rl_index = req_comp_rl_len-1;
+	const Real& lev_0    = pdf_rlevs[0];
+	const Real& lev_last = pdf_rlevs[last_rl_index];
+	// to properly sum to 1, final PDF bin must be closed on right end.
+	// --> where the max sample value defines the last response level,
+	//     move any max samples on right boundary inside last PDF bin.
+	if (max <= lev_last && bins[req_comp_rl_len]) {
+	  bins[req_comp_rl_len-1] += bins[req_comp_rl_len];
+	  bins[req_comp_rl_len]    = 0;
+	}
+
+	// compute computedPDF{Abscissas,Ordinates} from bin counts and widths
+	size_t pdf_size = last_rl_index;
+	if (min < lev_0)    ++pdf_size;
+	if (max > lev_last) ++pdf_size;
+	RealVector& abs_i = computedPDFAbscissas[i]; abs_i.resize(pdf_size+1);
+	RealVector& ord_i = computedPDFOrdinates[i]; ord_i.resize(pdf_size);
+	size_t offset = 0;
+	if (min < lev_0) {
+	  abs_i[0] = min;
+	  ord_i[0] = (Real)bins[0]/(Real)num_samp/(lev_0 - min);
+	  offset = 1;
+	}
+	for (j=0; j<last_rl_index; ++j) {
+	  abs_i[j+offset] = pdf_rlevs[j];
+	  ord_i[j+offset]
+	    = (Real)bins[j+1]/(Real)num_samp/(pdf_rlevs[j+1] - pdf_rlevs[j]);
+	}
+	if (max > lev_last) {
+	  abs_i[pdf_size-1] = pdf_rlevs[last_rl_index];
+	  abs_i[pdf_size]   = max;
+	  ord_i[pdf_size-1]
+	    = (Real)bins[req_comp_rl_len]/(Real)num_samp/(max - lev_last);
+	}
+	else
+	  abs_i[pdf_size] = pdf_rlevs[last_rl_index];
+      }
+      archive_pdf(i);
+    }
+  }
+}
+
+
+void NonDSampling::update_final_statistics()
+{
+  //if (finalStatistics.is_null())
+  //  initialize_final_statistics();
+
+  if (epistemicStats) {
+    size_t i, cntr = 0;
+    for (i=0; i<numFunctions; ++i) {
+      finalStatistics.function_value(extremeValues(0, i), cntr++);
+      finalStatistics.function_value(extremeValues(1, i), cntr++);
+    }
+  }
+  else // moments + level mappings
+    NonD::update_final_statistics();
+}
+
+
+void NonDSampling::print_statistics(std::ostream& s) const
+{
+  if (epistemicStats) // output only min & max values in the epistemic case
+    print_intervals(s);
+  else {
+    print_moments(s);
+    if (totalLevelRequests) {
+      print_distribution_mappings(s);
+      if (pdfOutput)
+	print_pdf_mappings(s);
+      print_system_mappings(s);
+    }
+  }
+  if (!subIteratorFlag) {
+    StringMultiArrayConstView
+      acv_labels  = iteratedModel.all_continuous_variable_labels(),
+      adiv_labels = iteratedModel.all_discrete_int_variable_labels(),
+      adsv_labels = iteratedModel.all_discrete_string_variable_labels(),
+      adrv_labels = iteratedModel.all_discrete_real_variable_labels();
+    size_t cv_start, num_cv, div_start, num_div, dsv_start, num_dsv,
+      drv_start, num_drv;
+    mode_counts(iteratedModel, cv_start, num_cv, div_start, num_div,
+		dsv_start, num_dsv, drv_start, num_drv);
+    StringMultiArrayConstView
+      cv_labels  =
+        acv_labels[boost::indices[idx_range(cv_start, cv_start+num_cv)]],
+      div_labels =
+        adiv_labels[boost::indices[idx_range(div_start, div_start+num_div)]],
+      dsv_labels =
+        adsv_labels[boost::indices[idx_range(dsv_start, dsv_start+num_dsv)]],
+      drv_labels =
+        adrv_labels[boost::indices[idx_range(drv_start, drv_start+num_drv)]];
+    // Don't output for now
+    // nonDSampCorr.print_correlations(s, cv_labels, div_labels, dsv_labels,
+				//     drv_labels,iteratedModel.response_labels());
+  }
+}
+
+
+void NonDSampling::print_intervals(std::ostream& s) const
+{
+  const StringArray& resp_labels = iteratedModel.response_labels();
+
+  s << std::scientific << std::setprecision(write_precision)
+    << "\nMin and Max values for each response function:\n";
+  for (size_t i=0; i<numFunctions; ++i)
+    s << resp_labels[i] << ":  Min = " << extremeValues(0, i)
+      << "  Max = " << extremeValues(1, i) << '\n';
+}
+
+
+void NonDSampling::print_moments(std::ostream& s) const
+{
+  const StringArray& resp_labels = iteratedModel.response_labels();
+
+  s << std::scientific << std::setprecision(write_precision);
+
+  size_t i, j, width = write_precision+7;
+
+  s << "\nMoment-based statistics for each response function:\n"
+    << std::setw(width+15) << "Mean"     << std::setw(width+1) << "Std Dev"
+    << std::setw(width+1)  << "Skewness" << std::setw(width+2) << "Kurtosis\n";
+  //<< std::setw(width+2)  << "Coeff of Var\n";
+  for (i=0; i<numFunctions; ++i) {
+    const Real* moments_i = momentStats[i];
+    s << std::setw(14) << resp_labels[i];
+    for (j=0; j<4; ++j)
+      s << ' ' << std::setw(width) << moments_i[j];
+    s << '\n';
+  }
+  if (numSamples > 1) {
+    // output 95% confidence intervals as (,) interval
+    s << "\n95% confidence intervals for each response function:\n"
+      << std::setw(width+15) << "LowerCI_Mean" << std::setw(width+1)
+      << "UpperCI_Mean" << std::setw(width+1)  << "LowerCI_StdDev"
+      << std::setw(width+2) << "UpperCI_StdDev\n";
+    for (i=0; i<numFunctions; ++i)
+      s << std::setw(14) << resp_labels[i]
+	<< ' ' << std::setw(width) << momentCIs(0, i)
+	<< ' ' << std::setw(width) << momentCIs(1, i)
+	<< ' ' << std::setw(width) << momentCIs(2, i)
+	<< ' ' << std::setw(width) << momentCIs(3, i) << '\n';
+  }
+}
+
+
+void NonDSampling::print_pdf_mappings(std::ostream& s) const
+{
+  const StringArray& resp_labels = iteratedModel.response_labels();
+
+  // output CDF/CCDF probabilities resulting from binning or CDF/CCDF
+  // reliabilities resulting from number of std devs separating mean & target
+  s << std::scientific << std::setprecision(write_precision)
+    << "\nProbability Density Function (PDF) histograms for each response "
+    << "function:\n";
+  size_t i, j, width = write_precision+7;
+  for (i=0; i<numFunctions; ++i) {
+    if (!requestedRespLevels[i].empty() || !computedRespLevels[i].empty()) {
+      s << "PDF for " << resp_labels[i] << ":\n"
+	<< "          Bin Lower          Bin Upper      Density Value\n"
+	<< "          ---------          ---------      -------------\n";
+
+      size_t pdf_len = computedPDFOrdinates[i].length();
+      for (j=0; j<pdf_len; ++j)
+	s << "  " << std::setw(width) << computedPDFAbscissas[i][j] << "  "
+	  << std::setw(width) << computedPDFAbscissas[i][j+1] << "  "
+	  << std::setw(width) << computedPDFOrdinates[i][j] << '\n';
+    }
+  }
+}
+
+
+void NonDSampling::archive_allocate_pdf() // const
+{
+  if (!resultsDB.active())  return;
+
+  // pdf per function, possibly empty
+  MetaDataType md;
+  md["Array Spans"] = make_metadatavalue("Response Functions");
+  md["Row Labels"] =
+    make_metadatavalue("Bin Lower", "Bin Upper", "Density Value");
+  resultsDB.array_allocate<RealMatrix>
+    (run_identifier(), resultsNames.pdf_histograms, numFunctions, md);
+}
+
+
+void NonDSampling::archive_pdf(size_t i) // const
+{
+  if (!resultsDB.active()) return;
+
+  size_t pdf_len = computedPDFOrdinates[i].length();
+  RealMatrix pdf(3, pdf_len);
+  for (size_t j=0; j<pdf_len; ++j) {
+    pdf(0, j) = computedPDFAbscissas[i][j];
+    pdf(1, j) = computedPDFAbscissas[i][j+1];
+    pdf(2, j) = computedPDFOrdinates[i][j];
+  }
+
+  resultsDB.array_insert<RealMatrix>
+    (run_identifier(), resultsNames.pdf_histograms, i, pdf);
+}
+
+} // namespace Dakota
+
Index: /issm/trunk/externalpackages/dakota/install-6.2-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/install-6.2-linux-static.sh	(revision 24686)
@@ -0,0 +1,76 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+DAK_ROOT=${ISSM_DIR}/externalpackages/dakota
+VER="6.2"
+
+## Environment
+#
+export BLAS_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lfblas -L/usr/lib/x86_64-linux-gnu -lgfortran" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export BOOST_ROOT=${ISSM_DIR}/externalpackages/boost/install
+export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost (absolutely necessary for this version)
+#export CXXFLAGS='-std=c++11'
+export DAK_BUILD=${DAK_ROOT}/build
+export DAK_INSTALL=${DAK_ROOT}/install
+export DAK_SRC=${DAK_ROOT}/src
+export LAPACK_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lflapack -L/usr/lib/x86_64-linux-gnu -lgfortran" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+
+# Cleanup
+rm -rf build install src
+mkdir build install src
+
+#Download from ISSM server
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/dakota-${VER}-public.src.tar.gz" "dakota-${VER}-public-src.tar.gz"
+
+# Unpack source
+tar -zxvf dakota-${VER}-public-src.tar.gz
+
+# Move source to 'src' directory
+mv dakota-${VER}.0.src/* src
+rm -rf dakota-${VER}.0.src
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/${VER}/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp ${DAK_SRC}/packages/DDACE/src/Analyzer
+cp configs/${VER}/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp ${DAK_SRC}/packages/surfpack/src/surfaces/nkm
+cp configs/${VER}/src/DakotaInterface.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDLocalReliability.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDSampling.cpp ${DAK_SRC}/src
+
+# Copy customized source and configuration files specific to Linux to 'src' directory
+cp configs/${VER}/linux/cmake/BuildDakotaCustom.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/linux/cmake/DakotaDev.cmake ${DAK_SRC}/cmake
+
+# Configure
+cd ${DAK_BUILD}
+cmake \
+	-DBUILD_SHARED_LIBS=OFF \
+	-DBUILD_STATIC_LIBS=ON \
+	-DCMAKE_C_COMPILER=${MPI_HOME}/bin/mpicc \
+	-DCMAKE_CXX_COMPILER=${MPI_HOME}/bin/mpicxx \
+	-DCMAKE_Fortran_COMPILER=${MPI_HOME}/bin/mpif77 \
+	-DCMAKE_C_FLAGS="-fPIC" \
+	-DCMAKE_CXX_FLAGS="-fPIC" \
+	-DCMAKE_Fortran_FLAGS="-fPIC" \
+	-DHAVE_ACRO=OFF \
+	-DHAVE_JEGA=OFF \
+	-C${DAK_SRC}/cmake/BuildDakotaCustom.cmake \
+	-C${DAK_SRC}/cmake/DakotaDev.cmake \
+	${DAK_SRC}
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+cd ${DAK_INSTALL}
+
+# Comment out definition of HAVE_MPI in Teuchos config header file in order to
+# avoid conflict with our definition
+sed -i -e "s/#define HAVE_MPI/\/* #define HAVE_MPI *\//g" include/Teuchos_config.h
Index: /issm/trunk/externalpackages/dakota/install-6.2-linux.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/install-6.2-linux.sh	(revision 24686)
@@ -0,0 +1,73 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+DAK_ROOT=${ISSM_DIR}/externalpackages/dakota
+VER="6.2"
+
+## Environment
+#
+export BLAS_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lfblas -L/usr/lib/x86_64-linux-gnu -lgfortran" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export BOOST_ROOT=${ISSM_DIR}/externalpackages/boost/install
+export CXXFLAGS='-std=c++98' # Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost (absolutely necessary for this version)
+#export CXXFLAGS='-std=c++11'
+export DAK_BUILD=${DAK_ROOT}/build
+export DAK_INSTALL=${DAK_ROOT}/install
+export DAK_SRC=${DAK_ROOT}/src
+export LAPACK_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lflapack -L/usr/lib/x86_64-linux-gnu -lgfortran" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+
+# Cleanup
+rm -rf build install src
+mkdir build install src
+
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/dakota-${VER}-public.src.tar.gz" "dakota-${VER}-public-src.tar.gz"
+
+# Unpack source
+tar -zxvf dakota-${VER}-public-src.tar.gz
+
+# Move source to 'src' directory
+mv dakota-${VER}.0.src/* src
+rm -rf dakota-${VER}.0.src
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/${VER}/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp ${DAK_SRC}/packages/DDACE/src/Analyzer
+cp configs/${VER}/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp ${DAK_SRC}/packages/surfpack/src/surfaces/nkm
+cp configs/${VER}/src/DakotaInterface.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDLocalReliability.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDSampling.cpp ${DAK_SRC}/src
+
+# Copy customized source and configuration files specific to Linux to 'src' directory
+cp configs/${VER}/linux/cmake/BuildDakotaCustom.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/linux/cmake/DakotaDev.cmake ${DAK_SRC}/cmake
+
+# Configure
+cd ${DAK_BUILD}
+cmake \
+	-DBUILD_SHARED_LIBS=ON \
+	-DBUILD_STATIC_LIBS=OFF \
+	-DCMAKE_C_COMPILER=${MPI_HOME}/bin/mpicc \
+	-DCMAKE_CXX_COMPILER=${MPI_HOME}/bin/mpicxx \
+	-DCMAKE_Fortran_COMPILER=${MPI_HOME}/bin/mpif77 \
+	-DHAVE_ACRO=OFF \
+	-DHAVE_JEGA=OFF \
+	-C${DAK_SRC}/cmake/BuildDakotaCustom.cmake \
+	-C${DAK_SRC}/cmake/DakotaDev.cmake \
+	${DAK_SRC}
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+cd ${DAK_INSTALL}
+
+# Comment out definition of HAVE_MPI in Teuchos config header file in order to
+# avoid conflict with our definition
+sed -i -e "s/#define HAVE_MPI/\/* #define HAVE_MPI *\//g" include/Teuchos_config.h
Index: sm/trunk/externalpackages/dakota/install-6.2-linux64-static.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-linux64-static.sh	(revision 24685)
+++ 	(revision )
@@ -1,81 +1,0 @@
-#!/bin/bash
-set -eu
-
-# Constants
-DAK_VER="6.2"
-
-#Some cleanup
-rm -rf Dakota
-rm -rf src
-rm -rf build
-rm -rf install
-mkdir src build install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh https://issm.ess.uci.edu/files/externalpackages/dakota-${DAK_VER}-public.src.tar.gz dakota-${DAK_VER}-public-src.tar.gz
-
-#Untar
-tar -zxvf dakota-${DAK_VER}-public-src.tar.gz
-
-#Move Dakota to src directory
-mv dakota-${DAK_VER}.0.src/* src
-rm -rf dakota-${DAK_VER}.0.src
-
-#Set up Dakota cmake variables and config
-DAK_PATH=$ISSM_DIR/externalpackages/dakota
-
-export DAK_BUILD=$DAK_PATH/build
-export DAK_INSTALL=$DAK_PATH/install
-export DAK_SRC=$DAK_PATH/src
-export MPIHOME=$ISSM_DIR/externalpackages/mpich/install
-export BOOST_ROOT=$ISSM_DIR/externalpackages/boost/install
-
-cp $DAK_SRC/cmake/BuildDakotaTemplate.cmake $DAK_SRC/cmake/BuildDakotaCustom.cmake
-patch $DAK_SRC/cmake/BuildDakotaCustom.cmake configs/${DAK_VER}/BuildDakotaCustom.cmake.patch
-patch $DAK_SRC/cmake/DakotaDev.cmake configs/${DAK_VER}/DakotaDev.cmake.patch
-patch $DAK_SRC/CMakeLists.txt configs/${DAK_VER}/CMakeLists.txt.petsclibs.patch
-
-#Apply patches
-patch src/src/NonDSampling.cpp configs/${DAK_VER}/NonDSampling.cpp.patch
-patch src/src/NonDLocalReliability.cpp configs/${DAK_VER}/NonDLocalReliability.cpp.patch
-patch src/packages/pecos/src/pecos_global_defs.hpp configs/${DAK_VER}/pecos_global_defs.hpp.patch
-
-#Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
-export CXXFLAGS='-std=c++98'
-
-#Configure dakota
-cd $DAK_BUILD
-
-cmake -C$DAK_SRC/cmake/BuildDakotaCustom.cmake \
-		-C$DAK_SRC/cmake/DakotaDev.cmake \
-		-DBUILD_STATIC_LIBS=ON \
-		-DBUILD_SHARED_LIBS=OFF \
-		-DBOOST_ROOT=$BOOST_ROOT \
-		-DBoost_LIBRARY_DIRS=$BOOST_ROOT/lib \
-		-DBoost_NO_BOOST_CMAKE=TRUE \
-		-DBoost_NO_SYSTEM_PATHS=TRUE \
-		-DBoost_INSTALL_PREFIX=$DAK_INSTALL \
-		-DCMAKE_C_FLAGS="-O2 -g -fPIC" \
-		-DCMAKE_CXX_FLAGS="-O2 -g -fPIC" \
-		-DCMAKE_Fortran_FLAGS="-O2 -g -fPIC" \
-		-DCMAKE_C_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicc \
-		-DCMAKE_CXX_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicxx \
-		-DCMAKE_Fortran_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpif77 \
-		-DHAVE_ACRO=off \
-		-DHAVE_JEGA=off \
-		$DAK_SRC
-cd ..
-
-#Compile and install dakota
-cd $DAK_BUILD
-
-if [ $# -eq 0 ];
-then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-
-cd ..
Index: sm/trunk/externalpackages/dakota/install-6.2-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-linux64.sh	(revision 24685)
+++ 	(revision )
@@ -1,61 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf Dakota
-rm -rf src 
-rm -rf build 
-rm -rf install 
-mkdir src build install 
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/dakota-6.2-public.src.tar.gz' 'dakota-6.2-public-src.tar.gz'
-
-#Untar 
-tar -zxvf dakota-6.2-public-src.tar.gz
-
-#Move Dakota to src directory
-mv dakota-6.2.0.src/* src
-rm -rf dakota-6.2.0.src
-
-#Set up Dakota cmake variables and config
-export DAK_SRC=$ISSM_DIR/externalpackages/dakota/src
-export DAK_BUILD=$ISSM_DIR/externalpackages/dakota/build
-export MPIHOME=$ISSM_DIR/externalpackages/mpich/install
-cp $DAK_SRC/cmake/BuildDakotaTemplate.cmake $DAK_SRC/cmake/BuildDakotaCustom.cmake
-patch $DAK_SRC/cmake/BuildDakotaCustom.cmake configs/6.2/BuildDakotaCustom.cmake.patch
-patch $DAK_SRC/cmake/DakotaDev.cmake configs/6.2/DakotaDev.cmake.patch
-patch $DAK_SRC/CMakeLists.txt configs/6.2/CMakeLists.txt.petsclibs.patch
-
-#Apply patches
-patch src/src/NonDSampling.cpp configs/6.2/NonDSampling.cpp.patch
-patch src/src/NonDLocalReliability.cpp configs/6.2/NonDLocalReliability.cpp.patch
-patch src/packages/pecos/src/pecos_global_defs.hpp configs/6.2/pecos_global_defs.hpp.patch
-
-#Setting CXXFLAGS to deal with C++11 incompatibility with Matlab's Boost
-export CXXFLAGS='-std=c++98'
-
-#Configure dakota
-cd $DAK_BUILD
-
-cmake -D CMAKE_C_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicc \
-	   -D CMAKE_CXX_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpicxx \
-	   -D CMAKE_Fortran_COMPILER=$ISSM_DIR/externalpackages/mpich/install/bin/mpif77 \
-		-DHAVE_ACRO=off \
-		-DHAVE_JEGA=off \
-		-C $DAK_SRC/cmake/BuildDakotaCustom.cmake \
-		-C $DAK_SRC/cmake/DakotaDev.cmake \
-		$DAK_SRC
-cd ..
-
-#Compile and install dakota
-cd $DAK_BUILD
-if [ $# -eq 0 ];
-then
-	make
-	make install
-else
-	make -j $1
-	make -j $1 install
-fi
-cd ..
Index: /issm/trunk/externalpackages/dakota/install-6.2-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/install-6.2-mac-static.sh	(revision 24686)
@@ -0,0 +1,76 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+DAK_ROOT=${ISSM_DIR}/externalpackages/dakota
+VER="6.2"
+
+## Environment
+#
+export BLAS_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lfblas /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libgfortran.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libquadmath.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/gcc/x86_64-apple-darwin15/9.3.0/libgcc.a" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export BOOST_ROOT=${ISSM_DIR}/externalpackages/boost/install
+export DAK_BUILD=${DAK_ROOT}/build
+export DAK_INSTALL=${DAK_ROOT}/install
+export DAK_SRC=${DAK_ROOT}/src
+export LAPACK_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lflapack /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libgfortran.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libquadmath.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/gcc/x86_64-apple-darwin15/9.3.0/libgcc.a" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+
+# Cleanup
+rm -rf build install src
+mkdir build install src
+
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/dakota-${VER}-public.src.tar.gz" "dakota-${VER}-public-src.tar.gz"
+
+# Unpack source
+tar -zxvf dakota-${VER}-public-src.tar.gz
+
+# Move source to 'src' directory
+mv dakota-${VER}.0.src/* src
+rm -rf dakota-${VER}.0.src
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/${VER}/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp ${DAK_SRC}/packages/DDACE/src/Analyzer
+cp configs/${VER}/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp ${DAK_SRC}/packages/surfpack/src/surfaces/nkm
+cp configs/${VER}/src/DakotaInterface.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDLocalReliability.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDSampling.cpp ${DAK_SRC}/src
+
+# Copy customized source and configuration files specific to Mac to 'src' directory
+cp configs/${VER}/mac/cmake/BuildDakotaCustom.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/mac/cmake/DakotaDev.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/mac/packages/VPISparseGrid/src/sandia_rules.cpp ${DAK_SRC}/packages/VPISparseGrid/src
+
+# Configure
+cd ${DAK_BUILD}
+cmake \
+	-DBUILD_SHARED_LIBS=OFF \
+	-DBUILD_STATIC_LIBS=ON \
+	-DCMAKE_C_COMPILER=${MPI_HOME}/bin/mpicc \
+	-DCMAKE_CXX_COMPILER=${MPI_HOME}/bin/mpicxx \
+	-DCMAKE_Fortran_COMPILER=${MPI_HOME}/bin/mpif77 \
+	-DCMAKE_C_FLAGS="-fPIC" \
+	-DCMAKE_CXX_FLAGS="-fPIC -fdelayed-template-parsing" \
+	-DCMAKE_Fortran_FLAGS="-fPIC" \
+	-DBoost_NO_BOOST_CMAKE=TRUE \
+	-DHAVE_ACRO=OFF \
+	-DHAVE_JEGA=OFF \
+	-C${DAK_SRC}/cmake/BuildDakotaCustom.cmake \
+	-C${DAK_SRC}/cmake/DakotaDev.cmake \
+	${DAK_SRC}
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+cd ${DAK_INSTALL}
+
+# Comment out definition of HAVE_MPI in Teuchos config header file in order to
+# avoid conflict with our definition
+sed -i -e "s/#define HAVE_MPI/\/* #define HAVE_MPI *\//g" include/Teuchos_config.h
Index: /issm/trunk/externalpackages/dakota/install-6.2-mac.sh
===================================================================
--- /issm/trunk/externalpackages/dakota/install-6.2-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/dakota/install-6.2-mac.sh	(revision 24686)
@@ -0,0 +1,103 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+DAK_ROOT=${ISSM_DIR}/externalpackages/dakota
+VER="6.2"
+
+## Environment
+#
+export BLAS_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lfblas -L/usr/local/Cellar/gcc/9.3.0/lib/gcc/9 -lgfortran" # Need to export BLAS_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+export BOOST_ROOT=${ISSM_DIR}/externalpackages/boost/install
+export DAK_BUILD=${DAK_ROOT}/build
+export DAK_INSTALL=${DAK_ROOT}/install
+export DAK_SRC=${DAK_ROOT}/src
+export LAPACK_LIBS="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lflapack -L/usr/local/Cellar/gcc/9.3.0/lib/gcc/9 -lgfortran" # Need to export LAPACK_LIBS *and* pass it as an option to CMake to ensure that external packages also find it
+
+# Cleanup
+rm -rf build install src
+mkdir build install src
+
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/dakota-${VER}-public.src.tar.gz" "dakota-${VER}-public-src.tar.gz"
+
+# Unpack source
+tar -zxvf dakota-${VER}-public-src.tar.gz
+
+# Move source to 'src' directory
+mv dakota-${VER}.0.src/* src
+rm -rf dakota-${VER}.0.src
+
+# Copy customized source and configuration files to 'src' directory
+cp configs/${VER}/packages/DDACE/src/Analyzer/MainEffectsExcelOutput.cpp ${DAK_SRC}/packages/DDACE/src/Analyzer
+cp configs/${VER}/packages/surfpack/src/surfaces/nkm/NKM_KrigingModel.cpp ${DAK_SRC}/packages/surfpack/src/surfaces/nkm
+cp configs/${VER}/src/DakotaInterface.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDLocalReliability.cpp ${DAK_SRC}/src
+cp configs/${VER}/src/NonDSampling.cpp ${DAK_SRC}/src
+
+# Copy customized source and configuration files specific to Mac to 'src' directory
+cp configs/${VER}/mac/cmake/BuildDakotaCustom.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/mac/cmake/DakotaDev.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/mac/cmake/InstallDarwinDylibs.cmake ${DAK_SRC}/cmake
+cp configs/${VER}/mac/packages/VPISparseGrid/src/sandia_rules.cpp ${DAK_SRC}/packages/VPISparseGrid/src
+
+# Configure
+cd ${DAK_BUILD}
+cmake \
+	-DBUILD_SHARED_LIBS=ON \
+	-DBUILD_STATIC_LIBS=OFF \
+	-DCMAKE_C_COMPILER=${MPI_HOME}/bin/mpicc \
+	-DCMAKE_CXX_COMPILER=${MPI_HOME}/bin/mpicxx \
+	-DCMAKE_Fortran_COMPILER=${MPI_HOME}/bin/mpif77 \
+	-DCMAKE_CXX_FLAGS="-fdelayed-template-parsing" \
+	-DBoost_NO_BOOST_CMAKE=TRUE \
+	-DHAVE_ACRO=OFF \
+	-DHAVE_JEGA=OFF \
+	-C${DAK_SRC}/cmake/BuildDakotaCustom.cmake \
+	-C${DAK_SRC}/cmake/DakotaDev.cmake \
+	${DAK_SRC}
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+cd ${DAK_INSTALL}
+
+# Comment out definition of HAVE_MPI in Teuchos config header file in order to
+# avoid conflict with our definition
+sed -i -e "s/#define HAVE_MPI/\/* #define HAVE_MPI *\//g" include/Teuchos_config.h
+
+# Set install_name for all shared libraries
+cd ${DAK_INSTALL}/lib
+for name in *.dylib; do
+	install_name_tool -id ${DAK_INSTALL}/lib/${name} ${name}
+done
+
+## Patch install names for certain libraries
+#
+# TODO: Figure out how to reconfigure source to apply these install names at compile time
+#
+install_name_tool -change libdakota_src_fortran.dylib ${DAK_INSTALL}/lib/libdakota_src_fortran.dylib libdakota_src.dylib
+install_name_tool -change liblhs_mod.dylib ${DAK_INSTALL}/lib/liblhs_mod.dylib liblhs.dylib
+install_name_tool -change liblhs_mods.dylib ${DAK_INSTALL}/lib/liblhs_mods.dylib liblhs.dylib
+install_name_tool -change liblhs_mod.dylib ${DAK_INSTALL}/lib/liblhs_mod.dylib liblhs_mods.dylib
+install_name_tool -change libteuchos.dylib ${DAK_INSTALL}/lib/libteuchos.dylib liboptpp.dylib
+install_name_tool -change libdfftpack.dylib ${DAK_INSTALL}/lib/libdfftpack.dylib libpecos.dylib
+install_name_tool -change liblhs.dylib ${DAK_INSTALL}/lib/liblhs.dylib libpecos.dylib
+install_name_tool -change liblhs_mod.dylib ${DAK_INSTALL}/lib/liblhs_mod.dylib libpecos.dylib
+install_name_tool -change liblhs_mods.dylib ${DAK_INSTALL}/lib/liblhs_mods.dylib libpecos.dylib
+install_name_tool -change libpecos_src.dylib ${DAK_INSTALL}/lib/libpecos_src.dylib libpecos.dylib
+install_name_tool -change libteuchos.dylib ${DAK_INSTALL}/lib/libteuchos.dylib libpecos.dylib
+install_name_tool -change libdfftpack.dylib ${DAK_INSTALL}/lib/libdfftpack.dylib libpecos_src.dylib
+install_name_tool -change liblhs.dylib ${DAK_INSTALL}/lib/liblhs.dylib libpecos_src.dylib
+install_name_tool -change liblhs_mod.dylib ${DAK_INSTALL}/lib/liblhs_mod.dylib libpecos_src.dylib
+install_name_tool -change liblhs_mods.dylib ${DAK_INSTALL}/lib/liblhs_mods.dylib libpecos_src.dylib
+install_name_tool -change libteuchos.dylib ${DAK_INSTALL}/lib/libteuchos.dylib libpecos_src.dylib
+install_name_tool -change libsurfpack_fortran.dylib ${DAK_INSTALL}/lib/libsurfpack_fortran.dylib libsurfpack.dylib
Index: /issm/trunk/externalpackages/emscripten/install.sh
===================================================================
--- /issm/trunk/externalpackages/emscripten/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/emscripten/install.sh	(revision 24686)
@@ -3,26 +3,34 @@
 
 
+# TODO:
+# - Introduce build step to $ISSM_DIR/jenkins/jenkins.sh to compile Fortran code in $ISSM_DIR/src/c/modules/GiaDefelectionCorex/ to C with f2c
+#	- Then, revert $ISSM_DIR/externalpackages/emscripten/install.sh to r24306 and test clean build
+#	- When builtin support for Fortran is available, remove build step
+#
+
+VER="latest" # Set this to "latest", or last tag that works in case of failure
+
 # Get Emscripten SDK (emsdk) driver if we have not previously installed
 # Emscripten. Otherwise, just get the latest version.
-if [[ ! -d ./emsdk ]]; then
+if [[ ! -d ./install ]]; then
 	# Get the emsdk repo
 	git clone https://github.com/emscripten-core/emsdk.git
 
-	# Create symbolic link
-	ln -s ./emsdk ./install
+	# Move source to 'install' directory
+	mv ./emsdk ./install
 
-	cd ./emsdk
+	cd ./install
 else
 	# Fetch the latest version of the emsdk
-	cd ./emsdk
+	cd ./install
 	git pull
 fi
 
 # Download and install the latest SDK tools.
-./emsdk install latest
+./emsdk install $VER
 
 # Make the "latest" SDK "active" for the current user. (writes ~/.emscripten
 # file)
-./emsdk activate latest
+./emsdk activate $VER
 
 # Activate PATH and other environment variables in the current terminal
Index: sm/trunk/externalpackages/export_fig/README
===================================================================
--- /issm/trunk/externalpackages/export_fig/README	(revision 24685)
+++ 	(revision )
@@ -1,17 +1,0 @@
-Downloaded from https://www.mathworks.com/matlabcentral/fileexchange/23629-export-fig
-
-We had to patch export_fig.m to force export_fig to use the renderers painter, otherwise it is just way to slow (opengl is the default if we have patches)
-
-Line  585:
-
-
-if ~options.renderer
-    if hasTransparency || hasPatches
-        % This is *MUCH* slower, but more accurate for patches and transparent annotations (issue #39)
-        renderer = '-opengl';
-    else
-        renderer = '-painters';
-    end
-end
-
-change to painters all the time
Index: /issm/trunk/externalpackages/export_fig/README.md
===================================================================
--- /issm/trunk/externalpackages/export_fig/README.md	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/README.md	(revision 24686)
@@ -10,4 +10,6 @@
 
 Perhaps the best way to demonstrate what export_fig can do is with some examples.
+
+*Note: `export_fig` currently supports only figures created with the `figure` function, or GUIDE. Figures created using `uifigure` or AppDesigner are only partially supported. See issues [#287](https://github.com/altmany/export_fig/issues/287), [#261](https://github.com/altmany/export_fig/issues/261) for details.*
   
 ### Examples
@@ -169,5 +171,5 @@
 ```
 
-**Specifying the figure/axes** - if you have mutiple figures open you can specify which figure to export using its handle:  
+**Specifying the figure/axes** - if you have multiple figures open you can specify which figure to export using its handle:  
 ```Matlab
 export_fig(figure_handle, filename);
@@ -212,7 +214,7 @@
 **Smoothed/interpolated images in output PDF** - if you produce a PDF using export_fig and images in the PDF look overly smoothed or interpolated, this is because the software you are using to view the PDF is smoothing or interpolating the image data. The image is not smoothed in the PDF file itself. If the software has an option to disable this feature, you should select it. Alternatively, use another PDF viewer that doesn't exhibit this problem.  
   
-**Locating Ghostscript/pdftops** - You may find a dialogue box appears when using export_fig, asking you to locate either [Ghostscript](http://www.ghostscript.com) or [pdftops](http://www.foolabs.com/xpdf). These are separate applications which export_fig requires to perform certain functions. If such a dialogue appears it is because export_fig can't find the application automatically. This is because you either haven't installed it, or it isn't in the normal place. Make sure you install the applications correctly first. They can be downloaded from the following places:  
+**Locating Ghostscript/pdftops** - You may find a dialogue box appears when using export_fig, asking you to locate either [Ghostscript](http://www.ghostscript.com) or [pdftops (part of the Xpdf package)](http://www.xpdfreader.com). These are separate applications which export_fig requires to perform certain functions. If such a dialogue appears it is because export_fig can't find the application automatically. This is because you either haven't installed it, or it isn't in the normal place. Make sure you install the applications correctly first. They can be downloaded from the following places:  
  1. Ghostscript:     [www.ghostscript.com](http://www.ghostscript.com)
- 2. pdftops (install the Xpdf package): [www.foolabs.com/xpdf](http://www.foolabs.com/xpdf)
+ 2. pdftops (install the Xpdf package): [www.xpdfreader.com](http://www.xpdfreader.com)
 
 If you choose to install them in a non-default location then point export_fig
Index: /issm/trunk/externalpackages/export_fig/append_pdfs.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/append_pdfs.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/append_pdfs.m	(revision 24686)
@@ -35,47 +35,90 @@
 % 26/02/15: If temp dir is not writable, use the output folder for temp
 %           files when appending (Javier Paredes); sanity check of inputs
+% 24/01/18: Fixed error in case of existing output file (append mode)
+% 24/01/18: Fixed issue #213: non-ASCII characters in folder names on Windows
+% 06/12/18: Avoid an "invalid escape-char" warning upon error
 
 function append_pdfs(varargin)
 
-if nargin < 2,  return;  end  % sanity check
+    if nargin < 2,  return;  end  % sanity check
 
-% Are we appending or creating a new file
-append = exist(varargin{1}, 'file') == 2;
-output = [tempname '.pdf'];
-try
-    % Ensure that the temp dir is writable (Javier Paredes 26/2/15)
-    fid = fopen(output,'w');
-    fwrite(fid,1);
-    fclose(fid);
-    delete(output);
-    isTempDirOk = true;
-catch
-    % Temp dir is not writable, so use the output folder
-    [dummy,fname,fext] = fileparts(output); %#ok<ASGLU>
-    fpath = fileparts(varargin{1});
-    output = fullfile(fpath,[fname fext]);
-    isTempDirOk = false;
+    % Are we appending or creating a new file
+    append = exist(varargin{1}, 'file') == 2;
+    output = [tempname '.pdf'];
+    try
+        % Ensure that the temp dir is writable (Javier Paredes 26/2/15)
+        fid = fopen(output,'w');
+        fwrite(fid,1);
+        fclose(fid);
+        delete(output);
+        isTempDirOk = true;
+    catch
+        % Temp dir is not writable, so use the output folder
+        [dummy,fname,fext] = fileparts(output); %#ok<ASGLU>
+        fpath = fileparts(varargin{1});
+        output = fullfile(fpath,[fname fext]);
+        isTempDirOk = false;
+    end
+    if ~append
+        output = varargin{1};
+        varargin = varargin(2:end);
+    end
+
+    % Create the command file
+    if isTempDirOk
+        cmdfile = [tempname '.txt'];
+    else
+        cmdfile = fullfile(fpath,[fname '.txt']);
+    end
+    prepareCmdFile(cmdfile, output, varargin{:});
+
+    % Call ghostscript
+    [status, errMsg] = ghostscript(['@"' cmdfile '"']);
+
+    % Check for ghostscript execution errors
+    if status && ~isempty(strfind(errMsg,'undefinedfile')) && ispc %#ok<STREMP>
+        % Fix issue #213: non-ASCII characters in folder names on Windows
+        for fileIdx = 2 : numel(varargin)
+            [fpath,fname,fext] = fileparts(varargin{fileIdx});
+            varargin{fileIdx} = fullfile(normalizePath(fpath),[fname fext]);
+        end
+        % Rerun ghostscript with the normalized folder names
+        prepareCmdFile(cmdfile, output, varargin{:});
+        [status, errMsg] = ghostscript(['@"' cmdfile '"']);
+    end
+
+    % Delete the command file
+    delete(cmdfile);
+
+    % Check for ghostscript execution errors
+    if status
+        errMsg = strrep(errMsg,'\','\\');  % Avoid an "invalid escape-char" warning
+        error('YMA:export_fig:append_pdf',errMsg);
+    end
+
+    % Rename the file if needed
+    if append
+        movefile(output, varargin{1}, 'f');
+    end
 end
-if ~append
-    output = varargin{1};
-    varargin = varargin(2:end);
+
+% Prepare a text file with ghostscript directives
+function prepareCmdFile(cmdfile, output, varargin)
+    fh = fopen(cmdfile, 'w');
+    fprintf(fh, '-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dPDFSETTINGS=/prepress -sOutputFile="%s" -f', output);
+    fprintf(fh, ' "%s"', varargin{:});
+    fclose(fh);
 end
-% Create the command file
-if isTempDirOk
-    cmdfile = [tempname '.txt'];
-else
-    cmdfile = fullfile(fpath,[fname '.txt']);
+
+% Convert long/non-ASCII folder names into their short ASCII equivalents
+function pathStr = normalizePath(pathStr)
+    [fpath,fname,fext] = fileparts(pathStr);
+    if isempty(fpath) || strcmpi(fpath,pathStr), return, end
+    dirOutput = evalc(['system(''dir /X /AD "' pathStr '*"'')']);
+    shortName = strtrim(regexprep(dirOutput,{'.*> *',[fname fext '.*']},''));
+    if isempty(shortName)
+        shortName = [fname fext];
+    end
+    fpath = normalizePath(fpath);  %recursive until entire fpath is processed
+    pathStr = fullfile(fpath, shortName);
 end
-fh = fopen(cmdfile, 'w');
-fprintf(fh, '-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dPDFSETTINGS=/prepress -sOutputFile="%s" -f', output);
-fprintf(fh, ' "%s"', varargin{:});
-fclose(fh);
-% Call ghostscript
-ghostscript(['@"' cmdfile '"']);
-% Delete the command file
-delete(cmdfile);
-% Rename the file if needed
-if append
-    movefile(output, varargin{1});
-end
-end
Index: /issm/trunk/externalpackages/export_fig/copyfig.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/copyfig.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/copyfig.m	(revision 24686)
@@ -14,9 +14,10 @@
 %    fh_new - The handle of the created figure.
 
-% Copyright (C) Oliver Woodford 2012
+% Copyright (C) Oliver Woodford 2012, Yair Altman 2015
 
 % 26/02/15: If temp dir is not writable, use the dest folder for temp
 %           destination files (Javier Paredes)
 % 15/04/15: Suppress warnings during copyobj (Dun Kirk comment on FEX page 2013-10-02)
+% 09/09/18: Fix issue #252: Workaround for cases where copyobj() fails for any reason
 
     % Set the default
@@ -25,10 +26,17 @@
     end
     % Is there a legend?
-    if isempty(findall(fh, 'Type', 'axes', 'Tag', 'legend'))
+    useCopyobj = isempty(findall(fh, 'Type', 'axes', 'Tag', 'legend'));
+    if useCopyobj
         % Safe to copy using copyobj
-        oldWarn = warning('off'); %#ok<WNOFF>  %Suppress warnings during copyobj (Dun Kirk comment on FEX page 2013-10-02)
-        fh = copyobj(fh, 0);
+        oldWarn = warning('off'); %Suppress warnings during copyobj (Dun Kirk comment on FEX page 2013-10-02)
+        try
+            fh = copyobj(fh, 0);
+        catch
+            % Fix issue #252: Workaround for cases where copyobj() fails for any reason
+            useCopyobj = false;  % if copyobj() croaks, use file save/load below
+        end
         warning(oldWarn);
-    else
+    end
+    if ~useCopyobj
         % copyobj will change the figure, so save and then load it instead
         tmp_nam = [tempname '.fig'];
Index: /issm/trunk/externalpackages/export_fig/eps2pdf.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/eps2pdf.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/eps2pdf.m	(revision 24686)
@@ -42,12 +42,9 @@
 % http://www.mathworks.com/matlabcentral/fileexchange/23629
 
-% Thank you to Fabio Viola for pointing out compression artifacts, leading
-% to the quality setting.
-% Thank you to Scott for pointing out the subsampling of very small images,
-% which was fixed for lossless compression settings.
-
-% 9/12/2011 Pass font path to ghostscript.
-% 26/02/15: If temp dir is not writable, use the dest folder for temp
-%           destination files (Javier Paredes)
+% Thank you Fabio Viola for pointing out compression artifacts, leading to the quality setting.
+% Thank you Scott for pointing out the subsampling of very small images, which was fixed for lossless compression settings.
+
+% 09/12/11: Pass font path to ghostscript
+% 26/02/15: If temp dir is not writable, use the dest folder for temp destination files (Javier Paredes)
 % 28/02/15: Enable users to specify optional ghostscript options (issue #36)
 % 01/03/15: Upon GS error, retry without the -sFONTPATH= option (this might solve
@@ -57,11 +54,16 @@
 % 22/02/16: Bug fix from latest release of this file (workaround for issue #41)
 % 20/03/17: Added informational message in case of GS croak (issue #186)
+% 16/01/18: Improved appending of multiple EPS files into single PDF (issue #233; thanks @shartjen)
+% 18/10/19: Workaround for GS 9.51+ .setpdfwrite removal problem (issue #285)
+% 18/10/19: Warn when ignoring GS fontpath or quality options; clarified error messages
 
     % Intialise the options string for ghostscript
     options = ['-q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dPDFSETTINGS=/prepress -sOutputFile="' dest '"'];
+
     % Set crop option
     if nargin < 3 || crop
         options = [options ' -dEPSCrop'];
     end
+
     % Set the font path
     fp = font_path();
@@ -69,21 +71,27 @@
         options = [options ' -sFONTPATH="' fp '"'];
     end
+
     % Set the grayscale option
     if nargin > 4 && gray
         options = [options ' -sColorConversionStrategy=Gray -dProcessColorModel=/DeviceGray'];
     end
+
     % Set the bitmap quality
+    qualityOptions = '';
     if nargin > 5 && ~isempty(quality)
-        options = [options ' -dAutoFilterColorImages=false -dAutoFilterGrayImages=false'];
+        qualityOptions = ' -dAutoFilterColorImages=false -dAutoFilterGrayImages=false';
         if quality > 100
-            options = [options ' -dColorImageFilter=/FlateEncode -dGrayImageFilter=/FlateEncode -c ".setpdfwrite << /ColorImageDownsampleThreshold 10 /GrayImageDownsampleThreshold 10 >> setdistillerparams"'];
+            qualityOptions = [qualityOptions ' -dColorImageFilter=/FlateEncode -dGrayImageFilter=/FlateEncode'];
+            qualityOptions = [qualityOptions ' -c ".setpdfwrite << /ColorImageDownsampleThreshold 10 /GrayImageDownsampleThreshold 10 >> setdistillerparams"'];
         else
-            options = [options ' -dColorImageFilter=/DCTEncode -dGrayImageFilter=/DCTEncode'];
+            qualityOptions = [qualityOptions ' -dColorImageFilter=/DCTEncode -dGrayImageFilter=/DCTEncode'];
             v = 1 + (quality < 80);
             quality = 1 - quality / 100;
             s = sprintf('<< /QFactor %.2f /Blend 1 /HSample [%d 1 1 %d] /VSample [%d 1 1 %d] >>', quality, v, v, v, v);
-            options = sprintf('%s -c ".setpdfwrite << /ColorImageDict %s /GrayImageDict %s >> setdistillerparams"', options, s, s);
-        end
-    end
+            qualityOptions = [qualityOptions ' -c ".setpdfwrite << /ColorImageDict ' s ' /GrayImageDict ' s ' >> setdistillerparams"'];
+        end
+        options = [options qualityOptions];
+    end
+
     % Enable users to specify optional ghostscript options (issue #36)
     if nargin > 6 && ~isempty(gs_options)
@@ -97,8 +105,10 @@
         options = [options gs_options];
     end
+
     % Check if the output file exists
     if nargin > 3 && append && exist(dest, 'file') == 2
         % File exists - append current figure to the end
-        tmp_nam = tempname;
+        tmp_nam = [tempname '.pdf'];
+        [fpath,fname,fext] = fileparts(tmp_nam);
         try
             % Ensure that the temp dir is writable (Javier Paredes 26/2/15)
@@ -109,29 +119,37 @@
         catch
             % Temp dir is not writable, so use the dest folder
-            [dummy,fname,fext] = fileparts(tmp_nam); %#ok<ASGLU>
             fpath = fileparts(dest);
             tmp_nam = fullfile(fpath,[fname fext]);
         end
-        % Copy the file
+        % Copy the existing (dest) pdf file to temporary folder
         copyfile(dest, tmp_nam);
-        % Add the output file names
-        options = [options ' -f "' tmp_nam '" "' source '"'];
+        % Produce an interim pdf of the source eps, rather than adding the eps directly (issue #233)
+        ghostscript([options ' -f "' source '"']);
+        [~,fname] = fileparts(tempname);
+        tmp_nam2 = fullfile(fpath,[fname fext]); % ensure using a writable folder (not necessarily tempdir)
+        copyfile(dest, tmp_nam2);
+        % Add the existing pdf and interim pdf as inputs to ghostscript
+        %options = [options ' -f "' tmp_nam '" "' source '"'];  % append the source eps to dest pdf
+        options = [options ' -f "' tmp_nam '" "' tmp_nam2 '"']; % append the interim pdf to dest pdf
         try
             % Convert to pdf using ghostscript
             [status, message] = ghostscript(options);
         catch me
-            % Delete the intermediate file
+            % Delete the intermediate files and rethrow the error
             delete(tmp_nam);
+            delete(tmp_nam2);
             rethrow(me);
         end
-        % Delete the intermediate file
+        % Delete the intermediate (temporary) files
         delete(tmp_nam);
+        delete(tmp_nam2);
     else
         % File doesn't exist or should be over-written
-        % Add the output file names
+        % Add the source eps file as input to ghostscript
         options = [options ' -f "' source '"'];
         % Convert to pdf using ghostscript
         [status, message] = ghostscript(options);
     end
+
     % Check for error
     if status
@@ -142,10 +160,24 @@
             options = regexprep(options, ' -sFONTPATH=[^ ]+ ',' ');
             status = ghostscript(options);
-            if ~status, return; end  % hurray! (no error)
-        end
+            if ~status % hurray! (no error)
+                warning('export_fig:GS:fontpath','Export_fig font option is ignored - not supported by your Ghostscript version')
+                return
+            end
+        end
+
+        % Retry without quality options (may solve problems with GS 9.51+, issue #285)
+        if ~isempty(qualityOptions)
+            options = strrep(orig_options, qualityOptions, '');
+            [status, message] = ghostscript(options);
+            if ~status % hurray! (no error)
+                warning('export_fig:GS:quality','Export_fig quality option is ignored - not supported by your Ghostscript version')
+                return
+            end
+        end
+
         % Report error
         if isempty(message)
-            error('Unable to generate pdf. Check destination directory is writable.');
-        elseif ~isempty(strfind(message,'/typecheck in /findfont'))
+            error('Unable to generate pdf. Ensure that the destination folder is writable.');
+        elseif ~isempty(strfind(message,'/typecheck in /findfont')) %#ok<STREMP>
             % Suggest a workaround for issue #41 (missing font path)
             font_name = strtrim(regexprep(message,'.*Operand stack:\s*(.*)\s*Execution.*','$1'));
@@ -158,5 +190,5 @@
             fprintf(2, '\nGhostscript error: perhaps %s is open by another application\n', dest);
             if ~isempty(gs_options)
-                fprintf(2, '  or maybe the%s option(s) are not accepted by your GS version\n', gs_options);
+                fprintf(2, '  or maybe your Ghostscript version does not accept the extra "%s" option(s) that you requested\n', gs_options);
             end
             fprintf(2, '  or maybe you have another gs executable in your system''s path\n');
Index: /issm/trunk/externalpackages/export_fig/export_fig.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/export_fig.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/export_fig.m	(revision 24686)
@@ -1,3 +1,3 @@
-function [imageData, alpha] = export_fig(varargin)
+function [imageData, alpha] = export_fig(varargin) %#ok<*STRCL1>
 %EXPORT_FIG  Exports figures in a publication-quality format
 %
@@ -25,5 +25,9 @@
 %   export_fig ... -update
 %   export_fig ... -nofontswap
+%   export_fig ... -font_space <char>
 %   export_fig ... -linecaps
+%   export_fig ... -noinvert
+%   export_fig ... -preserve_size
+%   export_fig ... -options <optionsStruct>
 %   export_fig(..., handle)
 %
@@ -38,11 +42,11 @@
 %   - Render images at native resolution (optional for bitmap formats)
 %   - Transparent background supported (pdf, eps, png, tif)
-%   - Semi-transparent patch objects supported (png & tif only)
-%   - RGB, CMYK or grayscale output (CMYK only with pdf, eps, tiff)
+%   - Semi-transparent patch objects supported (png, tif)
+%   - RGB, CMYK or grayscale output (CMYK only with pdf, eps, tif)
 %   - Variable image compression, including lossless (pdf, eps, jpg)
-%   - Optionally append to file (pdf, tiff)
-%   - Vector formats: pdf, eps
-%   - Bitmap formats: png, tiff, jpg, bmp, export to workspace
-%   - Rounded line-caps (optional; pdf & eps only)
+%   - Optional rounded line-caps (pdf, eps)
+%   - Optionally append to file (pdf, tif)
+%   - Vector formats: pdf, eps, svg
+%   - Bitmap formats: png, tif, jpg, bmp, export to workspace
 %
 % This function is especially suited to exporting figures for use in
@@ -58,13 +62,13 @@
 % background; only TIF & PNG formats support transparency of patch objects.
 %
-% The choice of renderer (opengl, zbuffer or painters) has a large impact
-% on the quality of output. The default value (opengl for bitmaps, painters
-% for vector formats) generally gives good results, but if you aren't
-% satisfied then try another renderer.  Notes: 1) For vector formats (EPS,
-% PDF), only painters generates vector graphics. 2) For bitmaps, only
-% opengl can render transparent patch objects correctly. 3) For bitmaps,
-% only painters will correctly scale line dash and dot lengths when
-% magnifying or anti-aliasing. 4) Fonts may be substitued with Courier when
-% using painters.
+% The choice of renderer (opengl/zbuffer/painters) has a large impact on the
+% output quality. The default value (opengl for bitmaps, painters for vector
+% formats) generally gives good results, but if you aren't satisfied
+% then try another renderer.  Notes:
+%   1) For vector formats (EPS,PDF), only painters generates vector graphics
+%   2) For bitmap formats, only opengl correctly renders transparent patches
+%   3) For bitmap formats, only painters correctly scales line dash and dot
+%      lengths when magnifying or anti-aliasing
+%   4) Fonts may be substitued with Courier when using painters
 %
 % When exporting to vector format (PDF & EPS) and bitmap format using the
@@ -72,29 +76,32 @@
 % on your system. You can download this from:
 %   http://www.ghostscript.com
-% When exporting to eps it additionally requires pdftops, from the Xpdf
-% suite of functions. You can download this from:
-%   http://www.foolabs.com/xpdf
+% When exporting to EPS it additionally requires pdftops, from the Xpdf
+% suite of functions. You can download this from: http://xpdfreader.com
+%
+% SVG output uses the fig2svg (https://github.com/kupiqu/fig2svg) or plot2svg
+% (https://github.com/jschwizer99/plot2svg) utilities, or Matlab's built-in
+% SVG export if neither of these utilities are available on Matlab's path.
+% Note: cropping/padding are not supported in export_fig's SVG output.
 %
 % Inputs:
 %   filename - string containing the name (optionally including full or
-%              relative path) of the file the figure is to be saved as. If
-%              a path is not specified, the figure is saved in the current
-%              directory. If no name and no output arguments are specified,
-%              the default name, 'export_fig_out', is used. If neither a
-%              file extension nor a format are specified, a ".png" is added
-%              and the figure saved in that format.
-%   -format1, -format2, etc. - strings containing the extensions of the
-%                              file formats the figure is to be saved as.
-%                              Valid options are: '-pdf', '-eps', '-png',
-%                              '-tif', '-jpg' and '-bmp'. All combinations
-%                              of formats are valid.
-%   -nocrop - option indicating that the borders of the output are not to
-%             be cropped.
+%             relative path) of the file the figure is to be saved as. If
+%             a path is not specified, the figure is saved in the current
+%             directory. If no name and no output arguments are specified,
+%             the default name, 'export_fig_out', is used. If neither a
+%             file extension nor a format are specified, a ".png" is added
+%             and the figure saved in that format.
+%   -<format> - string(s) containing the output file extension(s). Options:
+%             '-pdf', '-eps', '-svg', '-png', '-tif', '-jpg' and '-bmp'.
+%             Multiple formats can be specified, without restriction.
+%             For example: export_fig('-jpg', '-pdf', '-png', ...)
+%             Either '-tif','-tiff' can be specified, and either '-jpg','-jpeg'.
+%   -nocrop - option indicating that empty margins should not be cropped.
 %   -c[<val>,<val>,<val>,<val>] - option indicating crop amounts. Must be
 %             a 4-element vector of numeric values: [top,right,bottom,left]
 %             where NaN/Inf indicate auto-cropping, 0 means no cropping,
 %             and any other value mean cropping in pixel amounts.
-%   -transparent - option indicating that the figure background is to be
-%                  made transparent (png, pdf, tif and eps output only).
+%   -transparent - option indicating that the figure background is to be made
+%             transparent (PNG,PDF,TIF,EPS formats only). Implies -noinvert.
 %   -m<val> - option where val indicates the factor to magnify the
 %             on-screen figure pixel dimensions by when generating bitmap
@@ -116,20 +123,19 @@
 %             effects on image quality (disable with the -a1 option).
 %   -a1, -a2, -a3, -a4 - option indicating the amount of anti-aliasing to
-%                        use for bitmap outputs. '-a1' means no anti-
-%                        aliasing; '-a4' is the maximum amount (default).
+%             use for bitmap outputs. '-a1' means no anti-aliasing;
+%             '-a4' is the maximum amount (default).
 %   -<renderer> - option to force a particular renderer (painters, opengl or
-%                 zbuffer). Default value: opengl for bitmap formats or
-%                 figures with patches and/or transparent annotations;
-%                 painters for vector formats without patches/transparencies.
+%             zbuffer). Default value: opengl for bitmap formats or
+%             figures with patches and/or transparent annotations;
+%             painters for vector formats without patches/transparencies.
 %   -<colorspace> - option indicating which colorspace color figures should
-%                   be saved in: RGB (default), CMYK or gray. CMYK is only
-%                   supported in pdf, eps and tiff output.
-%   -q<val> - option to vary bitmap image quality (in pdf, eps and jpg
-%             files only).  Larger val, in the range 0-100, gives higher
-%             quality/lower compression. val > 100 gives lossless
-%             compression. Default: '-q95' for jpg, ghostscript prepress
-%             default for pdf & eps. Note: lossless compression can
-%             sometimes give a smaller file size than the default lossy
-%             compression, depending on the type of images.
+%             be saved in: RGB (default), CMYK or gray. Usage example: '-gray'.
+%             Note: CMYK is only supported in PDF, EPS and TIF formats.
+%   -q<val> - option to vary bitmap image quality (PDF, EPS, JPG formats only).
+%             A larger val, in the range 0-100, produces higher quality and
+%             lower compression. val > 100 results in lossless compression.
+%             Default: '-q95' for JPG, ghostscript prepress default for PDF,EPS.
+%             Note: lossless compression can sometimes give a smaller file size
+%             than the default lossy compression, depending on the image type.
 %   -p<val> - option to pad a border of width val to exported files, where
 %             val is either a relative size with respect to cropped image
@@ -139,13 +145,13 @@
 %             If used, the -nocrop flag will be ignored, i.e. the image will
 %             always be cropped and then padded. Default: 0 (i.e. no padding).
-%   -append - option indicating that if the file (pdfs only) already
-%             exists, the figure is to be appended as a new page, instead
-%             of being overwritten (default).
+%   -append - option indicating that if the file already exists the figure is to
+%             be appended as a new page, instead of being overwritten (default).
+%             PDF & TIF output formats only.
 %   -bookmark - option to indicate that a bookmark with the name of the
-%               figure is to be created in the output file (pdf only).
+%             figure is to be created in the output file (PDF format only).
 %   -clipboard - option to save output as an image on the system clipboard.
-%                Note: background transparency is not preserved in clipboard
+%             Note: background transparency is not preserved in clipboard
 %   -d<gs_option> - option to indicate a ghostscript setting. For example,
-%                   -dMaxBitmap=0 or -dNoOutputFonts (Ghostscript 9.15+).
+%             -dMaxBitmap=0 or -dNoOutputFonts (Ghostscript 9.15+).
 %   -depsc -  option to use EPS level-3 rather than the default level-2 print
 %             device. This solves some bugs with Matlab's default -depsc2 device
@@ -155,8 +161,19 @@
 %             done in vector formats (only): 11 standard Matlab fonts are
 %             replaced by the original figure fonts. This option prevents this.
+%   -font_space <char> - option to set a spacer character for font-names that
+%             contain spaces, used by EPS/PDF. Default: ''
 %   -linecaps - option to create rounded line-caps (vector formats only).
+%   -noinvert - option to avoid setting figure's InvertHardcopy property to
+%             'off' during output (this solves some problems of empty outputs).
+%   -preserve_size - option to preserve the figure's PaperSize property in output
+%             file (PDF/EPS formats only; default is to not preserve it).
+%   -options <optionsStruct> - format-specific parameters as defined in Matlab's
+%             documentation of the imwrite function, contained in a struct under
+%             the format name. For example to specify the JPG Comment parameter,
+%             pass a struct such as this: options.JPG.Comment='abc'. Similarly,
+%             options.PNG.BitDepth=4. Valid only for PNG,TIF,JPG output formats.
 %   handle -  The handle of the figure, axes or uipanels (can be an array of
-%             handles, but the objects must be in the same figure) to be
-%             saved. Default: gcf.
+%             handles, but the objects must be in the same figure) which is
+%             to be saved. Default: gcf (handle of current figure).
 %
 % Outputs:
@@ -248,4 +265,30 @@
 % 22/03/17: Fixed issue #187: only set manual ticks when no exponent is present
 % 09/04/17: Added -linecaps option (idea by Baron Finer, issue #192)
+% 15/09/17: Fixed issue #205: incorrect tick-labels when Ticks number don't match the TickLabels number
+% 15/09/17: Fixed issue #210: initialize alpha map to ones instead of zeros when -transparent is not used
+% 18/09/17: Added -font_space option to replace font-name spaces in EPS/PDF (workaround for issue #194)
+% 18/09/17: Added -noinvert option to solve some export problems with some graphic cards (workaround for issue #197)
+% 08/11/17: Fixed issue #220: axes exponent is removed in HG1 when TickMode is 'manual' (internal Matlab bug)
+% 08/11/17: Fixed issue #221: alert if the requested folder does not exist
+% 19/11/17: Workaround for issue #207: alert when trying to use transparent bgcolor with -opengl
+% 29/11/17: Workaround for issue #206: warn if exporting PDF/EPS for a figure that contains an image
+% 11/12/17: Fixed issue #230: use OpenGL renderer when exported image contains transparency (also see issue #206)
+% 30/01/18: Updated SVG message to point to https://github.com/kupiqu/plot2svg and display user-selected filename if available
+% 27/02/18: Fixed issue #236: axes exponent cropped from output if on right-hand axes
+% 29/05/18: Fixed issue #245: process "string" inputs just like 'char' inputs
+% 13/08/18: Fixed issue #249: correct black axes color to off-black to avoid extra cropping with -transparent
+% 27/08/18: Added a possible file-open reason in EPS/PDF write-error message (suggested by "craq" on FEX page)
+% 22/09/18: Xpdf website changed to xpdfreader.com
+% 23/09/18: Fixed issue #243: only set non-bold font (workaround for issue #69) in R2015b or earlier; warn if changing font
+% 23/09/18: Workaround for issue #241: don't use -r864 in EPS/PDF outputs when -native is requested (solves black lines problem)
+% 18/11/18: Issue #261: Added informative alert when trying to export a uifigure (which is not currently supported)
+% 13/12/18: Issue #261: Fixed last commit for cases of specifying axes/panel handle as input, rather than a figure handle
+% 13/01/19: Issue #72: Added basic SVG output support
+% 04/02/19: Workaround for issues #207 and #267: -transparent implies -noinvert
+% 08/03/19: Issue #269: Added ability to specify format-specific options for PNG,TIF,JPG outputs; fixed help section
+% 21/03/19: Fixed the workaround for issues #207 and #267 from 4/2/19 (-transparent now does *NOT* imply -noinvert; -transparent output should now be ok in all formats)
+% 12/06/19: Issue #277: Enabled preservation of figure's PaperSize in output PDF/EPS file
+% 06/08/19: Remove warning message about obsolete JavaFrame in R2019b
+% 30/10/19: Fixed issue #261: added support for exporting uifigures and uiaxes (thanks to idea by @MarvinILA)
 %}
 
@@ -266,7 +309,63 @@
     % Ensure that we have a figure handle
     if isequal(fig,-1)
-        return;  % silent bail-out
+        return  % silent bail-out
     elseif isempty(fig)
         error('No figure found');
+    else
+        oldWarn = warning('off','MATLAB:HandleGraphics:ObsoletedProperty:JavaFrame');
+        warning off MATLAB:ui:javaframe:PropertyToBeRemoved
+        uifig = handle(ancestor(fig,'figure'));
+        try jf = get(uifig,'JavaFrame'); catch, jf=1; end
+        warning(oldWarn);
+        if isempty(jf)  % this is a uifigure
+            %error('Figures created using the uifigure command or App Designer are not supported by export_fig. See <a href="https://github.com/altmany/export_fig/issues/261">issue #261</a> for details.');
+            if numel(fig) > 1
+                error('export_fig:uifigure:multipleHandles', 'export_fig only supports exporting a single uifigure handle at a time; array of handles is not currently supported.')
+            elseif ~any(strcmpi(fig.Type,{'figure','axes'}))
+                error('export_fig:uifigure:notFigureOrAxes', 'export_fig only supports exporting a uifigure or uiaxes handle; other handles of a uifigure are not currently supported.')
+            end
+            % fig is either a uifigure or uiaxes handle
+            isUiaxes = strcmpi(fig.Type,'axes');
+            if isUiaxes
+                % Label the specified axes so that we can find it in the legacy figure
+                oldUserData = fig.UserData;
+                tempStr = tempname;
+                fig.UserData = tempStr;
+            end
+            try
+                % Create an invisible legacy figure at the same position/size as the uifigure
+                hNewFig = figure('Units',uifig.Units, 'Position',uifig.Position, 'MenuBar','none', 'ToolBar','none', 'Visible','off');
+                % Copy the uifigure contents onto the new invisible legacy figure
+                try
+                    hChildren = allchild(uifig); %=uifig.Children;
+                    copyobj(hChildren,hNewFig);
+                catch
+                    warning('export_fig:uifigure:controls', 'Some uifigure controls cannot be exported by export_fig and will not appear in the generated output.');
+                end
+                try fig.UserData = oldUserData; catch, end  % restore axes UserData, if modified above
+                % Replace the uihandle in the input args with the legacy handle
+                if isUiaxes  % uiaxes
+                    % Locate the corresponding axes handle in the new legacy figure
+                    hAxes = findall(hNewFig,'type','axes','UserData',tempStr);
+                    if isempty(hAxes) % should never happen, check just in case
+                        hNewHandle = hNewFig;  % export the figure instead of the axes
+                    else
+                        hNewHandle = hAxes;  % new axes handle found: use it instead of the uiaxes
+                    end
+                else  % uifigure
+                    hNewHandle = hNewFig;
+                end
+                varargin(cellfun(@(c)isequal(c,fig),varargin)) = {hNewHandle};
+                % Rerun export_fig on the legacy figure (with the replaced handle)
+                [imageData, alpha] = export_fig(varargin{:});
+                % Delete the temp legacy figure and bail out
+                try delete(hNewFig); catch, end
+                return
+            catch err
+                % Clean up the temp legacy figure and report the error
+                try delete(hNewFig); catch, end
+                rethrow(err)
+            end
+        end
     end
 
@@ -337,7 +436,14 @@
             % Set the FontWeight of axes labels/titles to 'normal'
             % Fix issue #69: set non-bold font only if the string contains symbols (\beta etc.)
-            texLabels = findall(fig, 'type','text', 'FontWeight','bold');
-            symbolIdx = ~cellfun('isempty',strfind({texLabels.String},'\'));
-            set(texLabels(symbolIdx), 'FontWeight','normal');
+            % Issue #243: only set non-bold font (workaround for issue #69) in R2015b or earlier
+            try isPreR2016a = verLessThan('matlab','8.7'); catch, isPreR2016a = true; end
+            if isPreR2016a
+                texLabels = findall(fig, 'type','text', 'FontWeight','bold');
+                symbolIdx = ~cellfun('isempty',strfind({texLabels.String},'\'));
+                if ~isempty(symbolIdx)
+                    set(texLabels(symbolIdx), 'FontWeight','normal');
+                    warning('export_fig:BoldTexLabels', 'Bold labels with Tex symbols converted into non-bold in export_fig (fix for issue #69)');
+                end
+            end
         end
     catch
@@ -365,5 +471,8 @@
 
     % Set to print exactly what is there
-    set(fig, 'InvertHardcopy', 'off');
+    if options.invert_hardcopy
+        try set(fig, 'InvertHardcopy', 'off'); catch, end  % fail silently in uifigures
+    end
+
     % Set the renderer
     switch options.renderer
@@ -377,4 +486,6 @@
             renderer = '-opengl'; % Default for bitmaps
     end
+
+    hImages = findall(fig,'type','image');
 
     % Handle transparent patches
@@ -390,4 +501,13 @@
         elseif ~options.png && ~options.tif  % issue #168
             warning('export_fig:transparency', '%s\nTo export the transparency correctly, try using the ScreenCapture utility on the Matlab File Exchange: http://bit.ly/1QFrBip', msg);
+        end
+    elseif ~isempty(hImages)
+        % Fix for issue #230: use OpenGL renderer when exported image contains transparency
+        for idx = 1 : numel(hImages)
+            cdata = get(hImages(idx),'CData');
+            if any(isnan(cdata(:)))
+                hasTransparency = true;
+                break
+            end
         end
     end
@@ -428,4 +548,9 @@
                 set(hCB(yCol==0), 'YColor', [0 0 0]);
                 set(hCB(xCol==0), 'XColor', [0 0 0]);
+                % Correct black axes color to off-black (issue #249)
+                hAxes = findall(fig, 'Type','axes');
+                hXs = fixBlackAxle(hAxes, 'XColor');
+                hYs = fixBlackAxle(hAxes, 'YColor');
+                hZs = fixBlackAxle(hAxes, 'ZColor');
 
                 % The following code might cause out-of-memory errors
@@ -445,4 +570,8 @@
                 set(hCB(yCol==3), 'YColor', [1 1 1]);
                 set(hCB(xCol==3), 'XColor', [1 1 1]);
+                % Revert the black axes colors
+                set(hXs, 'XColor', [0,0,0]);
+                set(hYs, 'YColor', [0,0,0]);
+                set(hZs, 'ZColor', [0,0,0]);
 
                 % The following code might cause out-of-memory errors
@@ -487,5 +616,12 @@
                     res = options.magnify * get(0, 'ScreenPixelsPerInch') / 25.4e-3;
                     % Save the png
-                    imwrite(A, [options.name '.png'], 'Alpha', double(alpha), 'ResolutionUnit', 'meter', 'XResolution', res, 'YResolution', res);
+                    [format_options, bitDepth] = getFormatOptions(options, 'png');  %Issue #269
+                    if ~isempty(bitDepth) && bitDepth < 16 && size(A,3) == 3
+                        % BitDepth specification requires using a color-map
+                        [A, map] = rgb2ind(A, 256);
+                        imwrite(A, map, [options.name '.png'], 'Alpha',double(alpha), 'ResolutionUnit','meter', 'XResolution',res, 'YResolution',res, format_options{:});
+                    else
+                        imwrite(A, [options.name '.png'], 'Alpha',double(alpha), 'ResolutionUnit','meter', 'XResolution',res, 'YResolution',res, format_options{:});
+                    end
                     % Clear the png bit
                     options.png = false;
@@ -544,5 +680,5 @@
                 if options.alpha
                     imageData = A;
-                    alpha = zeros(size(A, 1), size(A, 2), 'single');
+                    alpha = ones(size(A, 1), size(A, 2), 'single');
                 end
             end
@@ -550,5 +686,12 @@
             if options.png
                 res = options.magnify * get(0, 'ScreenPixelsPerInch') / 25.4e-3;
-                imwrite(A, [options.name '.png'], 'ResolutionUnit', 'meter', 'XResolution', res, 'YResolution', res);
+                [format_options, bitDepth] = getFormatOptions(options, 'png');  %Issue #269
+                if ~isempty(bitDepth) && bitDepth < 16 && size(A,3) == 3
+                    % BitDepth specification requires using a color-map
+                    [A, map] = rgb2ind(A, 256);
+                    imwrite(A, map, [options.name '.png'], 'ResolutionUnit','meter', 'XResolution',res, 'YResolution',res, format_options{:});
+                else
+                    imwrite(A, [options.name '.png'], 'ResolutionUnit','meter', 'XResolution',res, 'YResolution',res, format_options{:});
+                end
             end
             if options.bmp
@@ -561,8 +704,9 @@
                     quality = 95;
                 end
+                format_options = getFormatOptions(options, 'jpg');  %Issue #269
                 if quality > 100
-                    imwrite(A, [options.name '.jpg'], 'Mode', 'lossless');
+                    imwrite(A, [options.name '.jpg'], 'Mode','lossless', format_options{:});
                 else
-                    imwrite(A, [options.name '.jpg'], 'Quality', quality);
+                    imwrite(A, [options.name '.jpg'], 'Quality',quality, format_options{:});
                 end
             end
@@ -580,5 +724,6 @@
                 end
                 append_mode = {'overwrite', 'append'};
-                imwrite(A, [options.name '.tif'], 'Resolution', options.magnify*get(0, 'ScreenPixelsPerInch'), 'WriteMode', append_mode{options.append+1});
+                format_options = getFormatOptions(options, 'tif');  %Issue #269
+                imwrite(A, [options.name '.tif'], 'Resolution',options.magnify*get(0,'ScreenPixelsPerInch'), 'WriteMode',append_mode{options.append+1}, format_options{:});
             end
         end
@@ -590,5 +735,5 @@
                 if hasTransparency || hasPatches
                     % This is *MUCH* slower, but more accurate for patches and transparent annotations (issue #39)
-                    renderer = '-painters'; %ISSM fix
+                    renderer = '-opengl';
                 else
                     renderer = '-painters';
@@ -624,26 +769,55 @@
             end
             % Generate the options for print
-            p2eArgs = {renderer, sprintf('-r%d', options.resolution)};
+            printArgs = {renderer};
+            if ~isempty(options.resolution)  % issue #241
+                printArgs{end+1} = sprintf('-r%d', options.resolution);
+            end
             if options.colourspace == 1  % CMYK
                 % Issue #33: due to internal bugs in Matlab's print() function, we can't use its -cmyk option
-                %p2eArgs{end+1} = '-cmyk';
+                %printArgs{end+1} = '-cmyk';
             end
             if ~options.crop
                 % Issue #56: due to internal bugs in Matlab's print() function, we can't use its internal cropping mechanism,
                 % therefore we always use '-loose' (in print2eps.m) and do our own cropping (in crop_borders)
-                %p2eArgs{end+1} = '-loose';
+                %printArgs{end+1} = '-loose';
             end
             if any(strcmpi(varargin,'-depsc'))
                 % Issue #45: lines in image subplots are exported in invalid color.
                 % The workaround is to use the -depsc parameter instead of the default -depsc2
-                p2eArgs{end+1} = '-depsc';
+                printArgs{end+1} = '-depsc';
             end
             try
+                % Remove background if requested (issue #207)
+                originalBgColor = get(fig, 'Color');
+                [hXs, hYs, hZs] = deal([]);
+                if options.transparent %&& ~isequal(get(fig, 'Color'), 'none')
+                    if options.renderer == 1  % OpenGL
+                        warning('export_fig:openglTransparentBG', '-opengl sometimes fails to produce transparent backgrounds; in such a case, try to use -painters instead');
+                    end
+
+                    % Fix for issue #207, #267 (corrected)
+                    set(fig,'Color','none');
+
+                    % Correct black axes color to off-black (issue #249)
+                    hAxes = findall(fig, 'Type','axes');
+                    hXs = fixBlackAxle(hAxes, 'XColor');
+                    hYs = fixBlackAxle(hAxes, 'YColor');
+                    hZs = fixBlackAxle(hAxes, 'ZColor');
+                end
                 % Generate an eps
-                print2eps(tmp_nam, fig, options, p2eArgs{:});
+                print2eps(tmp_nam, fig, options, printArgs{:});
+                % {
                 % Remove the background, if desired
-                if options.transparent && ~isequal(get(fig, 'Color'), 'none')
+                if options.transparent %&& ~isequal(get(fig, 'Color'), 'none')
                     eps_remove_background(tmp_nam, 1 + using_hg2(fig));
-                end
+
+                    % Revert the black axes colors
+                    set(hXs, 'XColor', [0,0,0]);
+                    set(hYs, 'YColor', [0,0,0]);
+                    set(hZs, 'ZColor', [0,0,0]);
+                end
+                %}
+                % Restore the figure's previous background color (if modified)
+                try set(fig,'Color',originalBgColor); drawnow; catch, end
                 % Fix colorspace to CMYK, if requested (workaround for issue #33)
                 if options.colourspace == 1  % CMYK
@@ -671,5 +845,6 @@
                     % Alert in case of error creating output PDF/EPS file (issue #179)
                     if exist(pdf_nam_tmp, 'file')
-                        error(['Could not create ' pdf_nam ' - perhaps the folder does not exist, or you do not have write permissions']);
+                        errMsg = ['Could not create ' pdf_nam ' - perhaps the folder does not exist, or you do not have write permissions, or the file is open in another application'];
+                        error(errMsg);
                     else
                         error('Could not generate the intermediary EPS file.');
@@ -677,6 +852,9 @@
                 end
             catch ex
+                % Restore the figure's previous background color (in case it was not already restored)
+                try set(fig,'Color',originalBgColor); drawnow; catch, end
                 % Delete the eps
                 delete(tmp_nam);
+                % Rethrow the EPS/PDF-generation error
                 rethrow(ex);
             end
@@ -718,4 +896,54 @@
                 end
             end
+            % Issue #206: warn if the figure contains an image
+            if ~isempty(hImages) && strcmpi(renderer,'-opengl')  % see addendum to issue #206
+                warnMsg = ['exporting images to PDF/EPS may result in blurry images on some viewers. ' ...
+                           'If so, try to change viewer, or increase the image''s CData resolution, or use -opengl renderer, or export via the print function. ' ...
+                           'See <a href="matlab:web(''https://github.com/altmany/export_fig/issues/206'',''-browser'');">issue #206</a> for details.'];
+                warning('export_fig:pdf_eps:blurry_image', warnMsg);
+            end
+        end
+
+        % SVG format
+        if options.svg
+            oldUnits = get(fig,'Units');
+            filename = [options.name '.svg'];
+            % Adapted from Dan Joshea's https://github.com/djoshea/matlab-save-figure :
+            try %if verLessThan('matlab', '8.4')
+                % Try using the fig2svg/plot2svg utilities
+                try
+                    fig2svg(filename, fig);  %https://github.com/kupiqu/fig2svg
+                catch
+                    plot2svg(filename, fig); %https://github.com/jschwizer99/plot2svg
+                    warning('export_fig:SVG:plot2svg', 'export_fig used the plot2svg utility for SVG output. Better results may be gotten via the fig2svg utility (https://github.com/kupiqu/fig2svg).');
+                end
+            catch %else  % (neither fig2svg nor plot2svg are available)
+                % Try Matlab's built-in svg engine (from Batik Graphics2D for java)
+                try
+                    set(fig,'Units','pixels');   % All data in the svg-file is saved in pixels
+                    printArgs = {renderer};
+                    if ~isempty(options.resolution)
+                        printArgs{end+1} = sprintf('-r%d', options.resolution);
+                    end
+                    print(fig, '-dsvg', printArgs{:}, filename);
+                    warning('export_fig:SVG:print', 'export_fig used Matlab''s built-in SVG output engine. Better results may be gotten via the fig2svg utility (https://github.com/kupiqu/fig2svg).');
+                catch err  % built-in print() failed - maybe an old Matlab release (no -dsvg)
+                    set(fig,'Units',oldUnits);
+                    filename = strrep(filename,'export_fig_out','filename');
+                    msg = ['SVG output is not supported for your figure: ' err.message '\n' ...
+                        'Try one of the following alternatives:\n' ...
+                        '  1. saveas(gcf,''' filename ''')\n' ...
+                        '  2. fig2svg utility: https://github.com/kupiqu/fig2svg\n' ...  % Note: replaced defunct https://github.com/jschwizer99/plot2svg with up-to-date fork on https://github.com/kupiqu/fig2svg
+                        '  3. export_fig to EPS/PDF, then convert to SVG using non-Matlab tools\n'];
+                    error(sprintf(msg)); %#ok<SPERR>
+                end
+            end
+            % SVG output was successful if we reached this point
+            % Restore original figure units
+            set(fig,'Units',oldUnits);
+            % Add warning about unsupported export_fig options with SVG output
+            if any(~isnan(options.crop_amounts)) || any(options.bb_padding)
+                warning('export_fig:SVG:options', 'export_fig''s SVG output does not [currently] support cropping/padding.');
+            end
         end
 
@@ -726,5 +954,5 @@
         else
             % Reset the hardcopy mode
-            set(fig, 'InvertHardcopy', old_mode);
+            try set(fig, 'InvertHardcopy', old_mode); catch, end  % fail silently in uifigures
             % Reset the axes limit and tick modes
             for a = 1:numel(Hlims)
@@ -775,5 +1003,5 @@
                 error(javachk('awt', 'export_fig -clipboard output'));
             catch
-                warning('export_fig -clipboard output failed: requires Java to work');
+                warning('export_fig:clipboardJava', 'export_fig -clipboard output failed: requires Java to work');
                 return;
             end
@@ -821,5 +1049,5 @@
                 cb.setContents(imSelection, []);
             catch
-                warning('export_fig -clipboard output failed: %s', lasterr); %#ok<LERR>
+                warning('export_fig:clipboardFailed', 'export_fig -clipboard output failed: %s', lasterr); %#ok<LERR>
             end
         end
@@ -835,12 +1063,14 @@
             fprintf(2, 'Please ensure:\n');
             fprintf(2, '  that you are using the <a href="https://github.com/altmany/export_fig/archive/master.zip">latest version</a> of export_fig\n');
-            if ismac
-                fprintf(2, '  and that you have <a href="http://pages.uoregon.edu/koch">Ghostscript</a> installed\n');
-            else
-                fprintf(2, '  and that you have <a href="http://www.ghostscript.com">Ghostscript</a> installed\n');
+            if isvector(options)
+                if ismac
+                    fprintf(2, '  and that you have <a href="http://pages.uoregon.edu/koch">Ghostscript</a> installed\n');
+                else
+                    fprintf(2, '  and that you have <a href="http://www.ghostscript.com">Ghostscript</a> installed\n');
+                end
             end
             try
                 if options.eps
-                    fprintf(2, '  and that you have <a href="http://www.foolabs.com/xpdf">pdftops</a> installed\n');
+                    fprintf(2, '  and that you have <a href="http://xpdfreader.com/download.html">pdftops</a> installed\n');
                 end
             catch
@@ -866,35 +1096,43 @@
     % Default options used by export_fig
     options = struct(...
-        'name',         'export_fig_out', ...
-        'crop',         true, ...
-        'crop_amounts', nan(1,4), ...  % auto-crop all 4 image sides
-        'transparent',  false, ...
-        'renderer',     0, ...         % 0: default, 1: OpenGL, 2: ZBuffer, 3: Painters
-        'pdf',          false, ...
-        'eps',          false, ...
-        'png',          false, ...
-        'tif',          false, ...
-        'jpg',          false, ...
-        'bmp',          false, ...
-        'clipboard',    false, ...
-        'colourspace',  0, ...         % 0: RGB/gray, 1: CMYK, 2: gray
-        'append',       false, ...
-        'im',           false, ...
-        'alpha',        false, ...
-        'aa_factor',    0, ...
-        'bb_padding',   0, ...
-        'magnify',      [], ...
-        'resolution',   [], ...
-        'bookmark',     false, ...
-        'closeFig',     false, ...
-        'quality',      [], ...
-        'update',       false, ...
-        'fontswap',     true, ...
-        'linecaps',     false, ...
-        'gs_options',   {{}});
+        'name',            'export_fig_out', ...
+        'crop',            true, ...
+        'crop_amounts',    nan(1,4), ...  % auto-crop all 4 image sides
+        'transparent',     false, ...
+        'renderer',        0, ...         % 0: default, 1: OpenGL, 2: ZBuffer, 3: Painters
+        'pdf',             false, ...
+        'eps',             false, ...
+        'svg',             false, ...
+        'png',             false, ...
+        'tif',             false, ...
+        'jpg',             false, ...
+        'bmp',             false, ...
+        'clipboard',       false, ...
+        'colourspace',     0, ...         % 0: RGB/gray, 1: CMYK, 2: gray
+        'append',          false, ...
+        'im',              false, ...
+        'alpha',           false, ...
+        'aa_factor',       0, ...
+        'bb_padding',      0, ...
+        'magnify',         [], ...
+        'resolution',      [], ...
+        'bookmark',        false, ...
+        'closeFig',        false, ...
+        'quality',         [], ...
+        'update',          false, ...
+        'fontswap',        true, ...
+        'font_space',      '', ...
+        'linecaps',        false, ...
+        'invert_hardcopy', true, ...
+        'format_options',  struct, ...
+        'preserve_size',   false, ...
+        'gs_options',      {{}});
 end
 
 function [fig, options] = parse_args(nout, fig, varargin)
     % Parse the input arguments
+
+    % Convert strings => chars
+    varargin = cellfun(@str2char,varargin,'un',false);
 
     % Set the defaults
@@ -931,4 +1169,6 @@
                     case 'eps'
                         options.eps = true;
+                    case 'svg'
+                        options.svg = true;
                     case 'png'
                         options.png = true;
@@ -957,10 +1197,4 @@
                         options.im = true;
                         options.alpha = true;
-                    case 'svg'
-                        msg = ['SVG output is not supported by export_fig. Use one of the following alternatives:\n' ...
-                               '  1. saveas(gcf,''filename.svg'')\n' ...
-                               '  2. plot2svg utility: http://github.com/jschwizer99/plot2svg\n' ...
-                               '  3. export_fig to EPS/PDF, then convert to SVG using generic (non-Matlab) tools\n'];
-                        error(sprintf(msg)); %#ok<SPERR>
                     case 'update'
                         % Download the latest version of export_fig into the export_fig folder
@@ -969,5 +1203,5 @@
                             folderName = fileparts(which(mfilename('fullpath')));
                             targetFileName = fullfile(folderName, datestr(now,'yyyy-mm-dd.zip'));
-                            urlwrite(zipFileName,targetFileName);
+                            urlwrite(zipFileName,targetFileName); %#ok<URLWR>
                         catch
                             error('Could not download %s into %s\n',zipFileName,targetFileName);
@@ -982,6 +1216,26 @@
                     case 'nofontswap'
                         options.fontswap = false;
+                    case 'font_space'
+                        options.font_space = varargin{a+1};
+                        skipNext = true;
                     case 'linecaps'
                         options.linecaps = true;
+                    case 'noinvert'
+                        options.invert_hardcopy = false;
+                    case 'preserve_size'
+                        options.preserve_size = true;
+                    case 'options'
+                        % Issue #269: format-specific options
+                        inputOptions = varargin{a+1};
+                        %options.format_options  = inputOptions;
+                        if isempty(inputOptions), continue, end
+                        formats = fieldnames(inputOptions(1));
+                        for idx = 1 : numel(formats)
+                            optionsStruct = inputOptions.(formats{idx});
+                            %optionsCells = [fieldnames(optionsStruct) struct2cell(optionsStruct)]';
+                            formatName = regexprep(lower(formats{idx}),{'tiff','jpeg'},{'tif','jpg'});
+                            options.format_options.(formatName) = optionsStruct; %=optionsCells(:)';
+                        end
+                        skipNext = true;
                     otherwise
                         try
@@ -1044,4 +1298,6 @@
                 [p, options.name, ext] = fileparts(varargin{a});
                 if ~isempty(p)
+                    % Issue #221: alert if the requested folder does not exist
+                    if ~exist(p,'dir'),  error(['Folder ' p ' does not exist!']);  end
                     options.name = [p filesep options.name];
                 end
@@ -1072,9 +1328,5 @@
                         end
                     case '.svg'
-                        msg = ['SVG output is not supported by export_fig. Use one of the following alternatives:\n' ...
-                               '  1. saveas(gcf,''filename.svg'')\n' ...
-                               '  2. plot2svg utility: http://github.com/jschwizer99/plot2svg\n' ...
-                               '  3. export_fig to EPS/PDF, then convert to SVG using generic (non-Matlab) tools\n'];
-                        error(sprintf(msg)); %#ok<SPERR>
+                        options.svg = true;
                     otherwise
                         options.name = varargin{a};
@@ -1127,45 +1379,56 @@
     % If requested, set the resolution to the native vertical resolution of the
     % first suitable image found
-    if native && isbitmap(options)
-        % Find a suitable image
-        list = findall(fig, 'Type','image', 'Tag','export_fig_native');
-        if isempty(list)
-            list = findall(fig, 'Type','image', 'Visible','on');
-        end
-        for hIm = list(:)'
-            % Check height is >= 2
-            height = size(get(hIm, 'CData'), 1);
-            if height < 2
-                continue
-            end
-            % Account for the image filling only part of the axes, or vice versa
-            yl = get(hIm, 'YData');
-            if isscalar(yl)
-                yl = [yl(1)-0.5 yl(1)+height+0.5];
-            else
-                yl = [min(yl), max(yl)];  % fix issue #151 (case of yl containing more than 2 elements)
-                if ~diff(yl)
+    if native
+        if isbitmap(options)
+            % Find a suitable image
+            list = findall(fig, 'Type','image', 'Tag','export_fig_native');
+            if isempty(list)
+                list = findall(fig, 'Type','image', 'Visible','on');
+            end
+            for hIm = list(:)'
+                % Check height is >= 2
+                height = size(get(hIm, 'CData'), 1);
+                if height < 2
                     continue
                 end
-                yl = yl + [-0.5 0.5] * (diff(yl) / (height - 1));
-            end
-            hAx = get(hIm, 'Parent');
-            yl2 = get(hAx, 'YLim');
-            % Find the pixel height of the axes
-            oldUnits = get(hAx, 'Units');
-            set(hAx, 'Units', 'pixels');
-            pos = get(hAx, 'Position');
-            set(hAx, 'Units', oldUnits);
-            if ~pos(4)
-                continue
-            end
-            % Found a suitable image
-            % Account for stretch-to-fill being disabled
-            pbar = get(hAx, 'PlotBoxAspectRatio');
-            pos = min(pos(4), pbar(2)*pos(3)/pbar(1));
-            % Set the magnification to give native resolution
-            options.magnify = abs((height * diff(yl2)) / (pos * diff(yl)));  % magnification must never be negative: issue #103
-            break
-        end
+                % Account for the image filling only part of the axes, or vice versa
+                yl = get(hIm, 'YData');
+                if isscalar(yl)
+                    yl = [yl(1)-0.5 yl(1)+height+0.5];
+                else
+                    yl = [min(yl), max(yl)];  % fix issue #151 (case of yl containing more than 2 elements)
+                    if ~diff(yl)
+                        continue
+                    end
+                    yl = yl + [-0.5 0.5] * (diff(yl) / (height - 1));
+                end
+                hAx = get(hIm, 'Parent');
+                yl2 = get(hAx, 'YLim');
+                % Find the pixel height of the axes
+                oldUnits = get(hAx, 'Units');
+                set(hAx, 'Units', 'pixels');
+                pos = get(hAx, 'Position');
+                set(hAx, 'Units', oldUnits);
+                if ~pos(4)
+                    continue
+                end
+                % Found a suitable image
+                % Account for stretch-to-fill being disabled
+                pbar = get(hAx, 'PlotBoxAspectRatio');
+                pos = min(pos(4), pbar(2)*pos(3)/pbar(1));
+                % Set the magnification to give native resolution
+                options.magnify = abs((height * diff(yl2)) / (pos * diff(yl)));  % magnification must never be negative: issue #103
+                break
+            end
+        elseif options.resolution == 864  % don't use -r864 in vector mode if user asked for -native
+            options.resolution = []; % issue #241 (internal Matlab bug produces black lines with -r864)
+        end
+    end
+end
+
+% Convert a possible string => char (issue #245)
+function value = str2char(value)
+    if isa(value,'string')
+        value = char(value);
     end
 end
@@ -1304,9 +1567,22 @@
             hAxes = Hlims(idx(idx2));
             props = {[ax 'TickMode'],'manual', [ax 'TickLabelMode'],'manual'};
-            if isempty(strtrim(hAxes.([ax 'Ruler']).SecondaryLabel.String))
-                set(hAxes, props{:});  % no exponent, so update moth ticks and tick labels to manual
+            tickVals = get(hAxes,[ax 'Tick']);
+            tickStrs = get(hAxes,[ax 'TickLabel']);
+            try % Fix issue #236
+                exponents = [hAxes.([ax 'Axis']).SecondaryLabel];
+            catch
+                exponents = [hAxes.([ax 'Ruler']).SecondaryLabel];
+            end
+            if isempty([exponents.String])
+                % Fix for issue #205 - only set manual ticks when the Ticks number match the TickLabels number
+                if numel(tickVals) == numel(tickStrs)
+                    set(hAxes, props{:});  % no exponent and matching ticks, so update both ticks and tick labels to manual
+                end
             end
         catch  % probably HG1
-            set(hAxes, props{:});  % revert back to old behavior
+            % Fix for issue #220 - exponent is removed in HG1 when TickMode is 'manual' (internal Matlab bug)
+            if isequal(tickVals, str2num(tickStrs)') %#ok<ST2NM>
+                set(hAxes, props{:});  % revert back to old behavior
+            end
         end
     end
@@ -1332,2 +1608,38 @@
     end
 end
+
+function hBlackAxles = fixBlackAxle(hAxes, axleName)
+    hBlackAxles = [];
+    for idx = 1 : numel(hAxes)
+        ax = hAxes(idx);
+        axleColor = get(ax, axleName);
+        if isequal(axleColor,[0,0,0]) || isequal(axleColor,'k')
+            hBlackAxles(end+1) = ax; %#ok<AGROW>
+        end
+    end
+    set(hBlackAxles, axleName, [0,0,0.01]);  % off-black
+end
+
+% Issue #269: format-specific options
+function [optionsCells, bitDepth] = getFormatOptions(options, formatName)
+    bitDepth = [];
+    try
+        optionsStruct = options.format_options.(lower(formatName));
+    catch
+        % User did not specify any extra parameters for this format
+        optionsCells = {};
+        return
+    end
+    optionNames = fieldnames(optionsStruct);
+    optionVals  = struct2cell(optionsStruct);
+    optionsCells = [optionNames, optionVals]';
+    if nargout < 2, return, end  % bail out if BitDepth is not required
+    try
+        idx = find(strcmpi(optionNames,'BitDepth'), 1, 'last');
+        if ~isempty(idx)
+            bitDepth = optionVals{idx};
+        end
+    catch
+        % never mind - ignore
+    end
+end
Index: /issm/trunk/externalpackages/export_fig/im2gif.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/im2gif.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/im2gif.m	(revision 24686)
@@ -42,145 +42,164 @@
 % Copyright (C) Oliver Woodford 2011
 
+%{
+% 14/02/18: Merged issue #235: reduced memory usage, improved performance (thanks to @numb7rs)
+% 30/11/19: Merged issue #288: Fix im2gif.m for greyscale TIFF images (thanks @Blackbelt1221)
+%}
+
 function im2gif(A, varargin)
 
-% Parse the input arguments
-[A, options] = parse_args(A, varargin{:});
-
-if options.crop ~= 0
-    % Crop
-    A = crop_borders(A, A(ceil(end/2),1,:,1));
-end
-
-% Convert to indexed image
-[h, w, c, n] = size(A);
-A = reshape(permute(A, [1 2 4 3]), h, w*n, c);
-map = unique(reshape(A, h*w*n, c), 'rows');
-if size(map, 1) > 256
-    dither_str = {'dither', 'nodither'};
-    dither_str = dither_str{1+(options.dither==0)};
-    if options.ncolors <= 1
-        [B, map] = rgb2ind(A, options.ncolors, dither_str);
-        if size(map, 1) > 256
-            [B, map] = rgb2ind(A, 256, dither_str);
+    % Parse the input arguments
+    [A, options] = parse_args(A, varargin{:});
+
+    if options.crop ~= 0
+        % Crop
+        A = crop_borders(A, A(ceil(end/2),1,:,1));
+    end
+
+    % Convert to indexed image
+    [h, w, c, n] = size(A);
+
+    % Issue #235: Using unique(A,'rows') on the whole image stack at once causes
+    % massive memory usage when dealing with large images (at least on Matlab 2017b).
+    % Running unique(...) on individual frames, then again on the results drastically
+    % reduces the memory usage & slightly improves the execution time (@numb7rs).
+    uns = cell(1,size(A,4));
+    for nn=1:size(A,4)
+        uns{nn}=unique(reshape(A(:,:,:,nn), h*w, c),'rows');
+    end
+    map=unique(cell2mat(uns'),'rows');
+
+    A = reshape(permute(A, [1 2 4 3]), h, w*n, c);
+
+    if size(map, 1) > 256
+        dither_str = {'dither', 'nodither'};
+        dither_str = dither_str{1+(options.dither==0)};
+        if options.ncolors <= 1
+            [B, map] = rgb2ind(A, options.ncolors, dither_str);
+            if size(map, 1) > 256
+                [B, map] = rgb2ind(A, 256, dither_str);
+            end
+        else
+            [B, map] = rgb2ind(A, min(round(options.ncolors), 256), dither_str);
         end
     else
-        [B, map] = rgb2ind(A, min(round(options.ncolors), 256), dither_str);
-    end
-else
-    if max(map(:)) > 1
-        map = double(map) / 255;
-        A = double(A) / 255;
-    end
-    B = rgb2ind(im2double(A), map);
-end
-B = reshape(B, h, w, 1, n);
-
-% Bug fix to rgb2ind
-map(B(1)+1,:) = im2double(A(1,1,:));
-
-% Save as a gif
-imwrite(B, map, options.outfile, 'LoopCount', round(options.loops(1)), 'DelayTime', options.delay);
+        if max(map(:)) > 1
+            map = double(map) / 255;
+            A = double(A) / 255;
+        end
+        B = rgb2ind(im2double(A), map);
+    end
+    B = reshape(B, h, w, 1, n);
+
+    % Bug fix to rgb2ind
+    map(B(1)+1,:) = im2double(A(1,1,:));
+
+    % Save as a gif
+    imwrite(B, map, options.outfile, 'LoopCount', round(options.loops(1)), 'DelayTime', options.delay);
 end
 
 %% Parse the input arguments
 function [A, options] = parse_args(A, varargin)
-% Set the defaults
-options = struct('outfile', '', ...
-                 'dither', true, ...
-                 'crop', true, ...
-                 'ncolors', 256, ...
-                 'loops', 65535, ...
-                 'delay', 1/15);
-
-% Go through the arguments
-a = 0;
-n = numel(varargin);
-while a < n
-    a = a + 1;
-    if ischar(varargin{a}) && ~isempty(varargin{a})
-        if varargin{a}(1) == '-'
-            opt = lower(varargin{a}(2:end));
-            switch opt
-                case 'nocrop'
-                    options.crop = false;
-                case 'nodither'
-                    options.dither = false;
-                otherwise
-                    if ~isfield(options, opt)
-                        error('Option %s not recognized', varargin{a});
-                    end
-                    a = a + 1;
-                    if ischar(varargin{a}) && ~ischar(options.(opt))
-                        options.(opt) = str2double(varargin{a});
-                    else
-                        options.(opt) = varargin{a};
-                    end
-            end
-        else
-            options.outfile = varargin{a};
-        end
-    end
-end
-
-if isempty(options.outfile)
-    if ~ischar(A)
-        error('No output filename given.');
-    end
-    % Generate the output filename from the input filename
-    [path, outfile] = fileparts(A);
-    options.outfile = fullfile(path, [outfile '.gif']);
-end
-
-if ischar(A)
-    % Read in the image
-    A = imread_rgb(A);
-end
+    % Set the defaults
+    options = struct('outfile', '', ...
+                     'dither', true, ...
+                     'crop', true, ...
+                     'ncolors', 256, ...
+                     'loops', 65535, ...
+                     'delay', 1/15);
+
+    % Go through the arguments
+    a = 0;
+    n = numel(varargin);
+    while a < n
+        a = a + 1;
+        if ischar(varargin{a}) && ~isempty(varargin{a})
+            if varargin{a}(1) == '-'
+                opt = lower(varargin{a}(2:end));
+                switch opt
+                    case 'nocrop'
+                        options.crop = false;
+                    case 'nodither'
+                        options.dither = false;
+                    otherwise
+                        if ~isfield(options, opt)
+                            error('Option %s not recognized', varargin{a});
+                        end
+                        a = a + 1;
+                        if ischar(varargin{a}) && ~ischar(options.(opt))
+                            options.(opt) = str2double(varargin{a});
+                        else
+                            options.(opt) = varargin{a};
+                        end
+                end
+            else
+                options.outfile = varargin{a};
+            end
+        end
+    end
+
+    if isempty(options.outfile)
+        if ~ischar(A)
+            error('No output filename given.');
+        end
+        % Generate the output filename from the input filename
+        [path, outfile] = fileparts(A);
+        options.outfile = fullfile(path, [outfile '.gif']);
+    end
+
+    if ischar(A)
+        % Read in the image
+        A = imread_rgb(A);
+    end
 end
 
 %% Read image to uint8 rgb array
 function [A, alpha] = imread_rgb(name)
-% Get file info
-info = imfinfo(name);
-% Special case formats
-switch lower(info(1).Format)
-    case 'gif'
-        [A, map] = imread(name, 'frames', 'all');
-        if ~isempty(map)
-            map = uint8(map * 256 - 0.5); % Convert to uint8 for storage
-            A = reshape(map(uint32(A)+1,:), [size(A) size(map, 2)]); % Assume indexed from 0
-            A = permute(A, [1 2 5 4 3]);
-        end
-    case {'tif', 'tiff'}
-        A = cell(numel(info), 1);
-        for a = 1:numel(A)
-            [A{a}, map] = imread(name, 'Index', a, 'Info', info);
+    % Get file info
+    info = imfinfo(name);
+    % Special case formats
+    switch lower(info(1).Format)
+        case 'gif'
+            [A, map] = imread(name, 'frames', 'all');
             if ~isempty(map)
                 map = uint8(map * 256 - 0.5); % Convert to uint8 for storage
-                A{a} = reshape(map(uint32(A{a})+1,:), [size(A) size(map, 2)]); % Assume indexed from 0
-            end
-            if size(A{a}, 3) == 4
-                % TIFF in CMYK colourspace - convert to RGB
-                if isfloat(A{a})
-                    A{a} = A{a} * 255;
-                else
-                    A{a} = single(A{a});
+                A = reshape(map(uint32(A)+1,:), [size(A) size(map, 2)]); % Assume indexed from 0
+                A = permute(A, [1 2 5 4 3]);
+            end
+        case {'tif', 'tiff'}
+            A = cell(numel(info), 1);
+            for a = 1:numel(A)
+                [A{a}, map] = imread(name, 'Index', a, 'Info', info);
+                if ~isempty(map)
+                    map = uint8(map * 256 - 0.5); % Convert to uint8 for storage
+                    A{a} = reshape(map(uint32(A{a})+1,:), [size(A) size(map, 2)]); % Assume indexed from 0
                 end
-                A{a} = 255 - A{a};
-                A{a}(:,:,4) = A{a}(:,:,4) / 255;
-                A{a} = uint8(A(:,:,1:3) .* A{a}(:,:,[4 4 4]));
-            end
-        end
-        A = cat(4, A{:});
-    otherwise
-        [A, map, alpha] = imread(name);
-        A = A(:,:,:,1); % Keep only first frame of multi-frame files
-        if ~isempty(map)
-            map = uint8(map * 256 - 0.5); % Convert to uint8 for storage
-            A = reshape(map(uint32(A)+1,:), [size(A) size(map, 2)]); % Assume indexed from 0
-        elseif size(A, 3) == 4
-            % Assume 4th channel is an alpha matte
-            alpha = A(:,:,4);
-            A = A(:,:,1:3);
-        end
+                if size(A{a}, 3) == 4
+                    % TIFF in CMYK colourspace - convert to RGB
+                    if isfloat(A{a})
+                        A{a} = A{a} * 255;
+                    else
+                        A{a} = single(A{a});
+                    end
+                    A{a} = 255 - A{a};
+                    A{a}(:,:,4) = A{a}(:,:,4) / 255;
+                    A{a} = uint8(A(:,:,1:3) .* A{a}(:,:,[4 4 4]));
+                elseif size(A{a}, 3) < 3 %Check whether TIFF has been read in as greyscale
+                    %Convert from greyscale to RGB colorspace (issue #288)
+                    A{a} = cat(3, A{a}, A{a}, A{a});
+                end
+            end
+            A = cat(4, A{:});
+        otherwise
+            [A, map, alpha] = imread(name);
+            A = A(:,:,:,1); % Keep only first frame of multi-frame files
+            if ~isempty(map)
+                map = uint8(map * 256 - 0.5); % Convert to uint8 for storage
+                A = reshape(map(uint32(A)+1,:), [size(A) size(map, 2)]); % Assume indexed from 0
+            elseif size(A, 3) == 4
+                % Assume 4th channel is an alpha matte
+                alpha = A(:,:,4);
+                A = A(:,:,1:3);
+            end
+    end
 end
-end
Index: sm/trunk/externalpackages/export_fig/license.txt
===================================================================
--- /issm/trunk/externalpackages/export_fig/license.txt	(revision 24685)
+++ 	(revision )
@@ -1,24 +1,0 @@
-Copyright (c) 2012, Oliver Woodford
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the distribution
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
Index: /issm/trunk/externalpackages/export_fig/pdf2eps.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/pdf2eps.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/pdf2eps.m	(revision 24686)
@@ -8,7 +8,7 @@
 % This function requires that you have pdftops, from the Xpdf suite of
 % functions, installed on your system. This can be downloaded from:
-% http://www.foolabs.com/xpdf  
+% http://xpdfreader.com
 %
-%IN:
+% Inputs:
 %   source - filename of the source pdf file to convert. The filename is
 %            assumed to already have the extension ".pdf".
@@ -16,36 +16,40 @@
 %          already have the extension ".eps".
 
-% Copyright (C) Oliver Woodford 2009-2010
+% Copyright (C) Oliver Woodford 2009-2010, Yair Altman 2015-
 
 % Thanks to Aldebaro Klautau for reporting a bug when saving to
 % non-existant directories.
 
+% 22/09/2018 - Xpdf website changed to xpdfreader.com
+
 function pdf2eps(source, dest)
-% Construct the options string for pdftops
-options = ['-q -paper match -eps -level2 "' source '" "' dest '"'];
-% Convert to eps using pdftops
-[status, message] = pdftops(options);
-% Check for error
-if status
-    % Report error
-    if isempty(message)
-        error('Unable to generate eps. Check destination directory is writable.');
-    else
-        error(message);
+    % Construct the options string for pdftops
+    options = ['-q -paper match -eps -level2 "' source '" "' dest '"'];
+
+    % Convert to eps using pdftops
+    [status, message] = pdftops(options);
+
+    % Check for error
+    if status
+        % Report error
+        if isempty(message)
+            error('Unable to generate eps. Check destination directory is writable.');
+        else
+            error(message);
+        end
     end
+
+    % Fix the DSC error created by pdftops
+    fid = fopen(dest, 'r+');
+    if fid == -1
+        % Cannot open the file
+        return
+    end
+    fgetl(fid); % Get the first line
+    str = fgetl(fid); % Get the second line
+    if strcmp(str(1:min(13, end)), '% Produced by')
+        fseek(fid, -numel(str)-1, 'cof');
+        fwrite(fid, '%'); % Turn ' ' into '%'
+    end
+    fclose(fid);
 end
-% Fix the DSC error created by pdftops
-fid = fopen(dest, 'r+');
-if fid == -1
-    % Cannot open the file
-    return
-end
-fgetl(fid); % Get the first line
-str = fgetl(fid); % Get the second line
-if strcmp(str(1:min(13, end)), '% Produced by')
-    fseek(fid, -numel(str)-1, 'cof');
-    fwrite(fid, '%'); % Turn ' ' into '%'
-end
-fclose(fid);
-end
-
Index: /issm/trunk/externalpackages/export_fig/pdftops.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/pdftops.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/pdftops.m	(revision 24686)
@@ -12,6 +12,5 @@
 %
 % This function requires that you have pdftops (from the Xpdf package)
-% installed on your system. You can download this from:
-% http://www.foolabs.com/xpdf
+% installed on your system. You can download this from: http://xpdfreader.com
 %
 % IN:
@@ -31,4 +30,6 @@
 % 02/05/2016 - Search additional possible paths suggested by Jonas Stein (issue #147)
 % 03/05/2016 - Display the specific error message if pdftops fails for some reason (issue #148)
+% 22/09/2018 - Xpdf website changed to xpdfreader.com; improved popup logic
+% 03/02/2019 - Fixed one-off 'pdftops not found' error after install (Mac/Linux) (issue #266)
 
     % Call pdftops
@@ -69,5 +70,5 @@
     % Ask the user to enter the path
     errMsg1 = 'Pdftops not found. Please locate the program, or install xpdf-tools from ';
-    url1 = 'http://foolabs.com/xpdf';
+    url1 = 'http://xpdfreader.com/download.html'; %='http://foolabs.com/xpdf';
     fprintf(2, '%s\n', [errMsg1 '<a href="matlab:web(''-browser'',''' url1 ''');">' url1 '</a>']);
     errMsg1 = [errMsg1 url1];
@@ -83,5 +84,5 @@
     errMsg2 = [errMsg2 url1];
 
-    state = 0;
+    state = 1;
     while 1
         if state
@@ -95,4 +96,5 @@
             case 'Install pdftops'
                 web('-browser',url1);
+                state = 0;
             case 'Issue #137'
                 web('-browser',url2);
@@ -141,8 +143,9 @@
     % system returns good = 1 even when the command runs
     % Look for something distinct in the help text
-    good = ~isempty(strfind(message, 'PostScript'));
+    good = ~isempty(strfind(message, 'PostScript')); %#ok<STREMP>
 
     % Display the error message if the pdftops executable exists but fails for some reason
-    if ~good && exist(path_,'file')  % file exists but generates an error
+    % Note: on Mac/Linux, exist('pdftops','file') will always return 2 due to pdftops.m => check for '/','.' (issue #266)
+    if ~good && exist(path_,'file') && ~isempty(regexp(path_,'[/.]')) %#ok<RGXP1> % file exists but generates an error
         fprintf('Error running %s:\n', path_);
         fprintf(2,'%s\n\n',message);
Index: /issm/trunk/externalpackages/export_fig/print2array.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/print2array.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/print2array.m	(revision 24686)
@@ -51,4 +51,5 @@
 % 07/07/15: Fixed issue #83: use numeric handles in HG1
 % 11/12/16: Fixed cropping issue reported by Harry D.
+% 29/09/18: Fixed issue #254: error in print2array>read_tif_img
 %}
 
@@ -208,4 +209,5 @@
 % Function to create a TIF image of the figure and read it into an array
 function [A, err, ex] = read_tif_img(fig, res_str, renderer, tmp_nam)
+    A =  [];  % fix for issue #254
     err = false;
     ex = [];
Index: /issm/trunk/externalpackages/export_fig/print2eps.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/print2eps.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/print2eps.m	(revision 24686)
@@ -14,5 +14,5 @@
 % where these have been changed by MATLAB, for up to 11 different fonts.
 %
-%IN:
+% Inputs:
 %   filename - string containing the name (optionally including full or
 %              relative path) of the file the figure is to be saved as. A
@@ -26,4 +26,8 @@
 %       crop       - Cropping flag. Deafult: 0
 %       fontswap   - Whether to swap non-default fonts in figure. Default: true
+%       preserve_size - Whether to preserve the figure's PaperSize. Default: false
+%       font_space - Character used to separate font-name terms in the EPS output
+%                    e.g. "Courier New" => "Courier-New". Default: ''
+%                    (available only via the struct alternative)
 %       renderer   - Renderer used to generate bounding-box. Default: 'opengl'
 %                    (available only via the struct alternative)
@@ -89,4 +93,15 @@
 % 12/06/16: Improved the fix for issue #159 (in the previous commit)
 % 12/06/16: Fixed issue #158: transparent patch color in PDF/EPS
+% 18/09/17: Fixed issue #194: incorrect fonts in EPS/PDF output
+% 18/09/17: Fixed issue #195: relaxed too-tight cropping in EPS/PDF
+% 14/11/17: Workaround for issue #211: dashed/dotted lines in 3D axes appear solid
+% 15/11/17: Updated issue #211: only set SortMethod='ChildOrder' in HG2, and when it looks the same onscreen; support multiple figure axes
+% 18/11/17: Fixed issue #225: transparent/translucent dashed/dotted lines appear solid in EPS/PDF
+% 24/03/18: Fixed issue #239: black title meshes with temporary black background figure bgcolor, causing bad cropping
+% 21/03/19: Improvement for issue #258: missing fonts in output EPS/PDF (still *NOT* fully solved)
+% 21/03/19: Fixed issues #166,#251: Arial font is no longer replaced with Helvetica but rather treated as a non-standard user font
+% 14/05/19: Made Helvetica the top default font-swap, replacing Courier
+% 12/06/19: Issue #277: Enabled preservation of figure's PaperSize in output PDF/EPS file
+% 06/08/19: Issue #281: only fix patch/textbox color if it's not opaque
 %}
 
@@ -104,11 +119,19 @@
     crop_amounts = nan(1,4);  % auto-crop all 4 sides by default
     if isstruct(export_options)
-        try fontswap     = export_options.fontswap;     catch, fontswap = true;     end
-        try bb_crop      = export_options.crop;         catch, bb_crop = 0;         end
-        try crop_amounts = export_options.crop_amounts; catch,                      end
-        try bb_padding   = export_options.bb_padding;   catch, bb_padding = 0;      end
-        try renderer     = export_options.rendererStr;  catch, renderer = 'opengl'; end  % fix for issue #110
+        try preserve_size = export_options.preserve_size; catch, preserve_size = false; end
+        try fontswap      = export_options.fontswap;      catch, fontswap = true;       end
+        try font_space    = export_options.font_space;    catch, font_space = '';       end
+        font_space(2:end) = '';
+        try bb_crop       = export_options.crop;          catch, bb_crop = 0;           end
+        try crop_amounts  = export_options.crop_amounts;  catch,                        end
+        try bb_padding    = export_options.bb_padding;    catch, bb_padding = 0;        end
+        try renderer      = export_options.rendererStr;   catch, renderer = 'opengl';   end  % fix for issue #110
         if renderer(1)~='-',  renderer = ['-' renderer];  end
     else
+        if numel(export_options) > 3  % preserve_size
+            preserve_size = export_options(4);
+        else
+            preserve_size = false;
+        end
         if numel(export_options) > 2  % font-swapping
             fontswap = export_options(3);
@@ -127,4 +150,5 @@
         end
         renderer = '-opengl';
+        font_space = '';
     end
 
@@ -135,7 +159,8 @@
 
     % Set paper size
-    old_pos_mode = get(fig, 'PaperPositionMode');
+    old_pos_mode    = get(fig, 'PaperPositionMode');
     old_orientation = get(fig, 'PaperOrientation');
-    set(fig, 'PaperPositionMode', 'auto', 'PaperOrientation', 'portrait');
+    old_paper_units = get(fig, 'PaperUnits');
+    set(fig, 'PaperPositionMode','auto', 'PaperOrientation','portrait', 'PaperUnits','points');
 
     % Find all the used fonts in the figure
@@ -155,7 +180,7 @@
         switch f
             case {'times', 'timesnewroman', 'times-roman'}
-                fontsl{a} = 'times-roman';
-            case {'arial', 'helvetica'}
-                fontsl{a} = 'helvetica';
+                fontsl{a} = 'times';
+            %case {'arial', 'helvetica'}  % issues #166, #251
+            %    fontsl{a} = 'helvetica';
             case {'newcenturyschoolbook', 'newcenturyschlbk'}
                 fontsl{a} = 'newcenturyschlbk';
@@ -167,6 +192,11 @@
     % Determine the font swap table
     if fontswap
-        matlab_fonts = {'Helvetica', 'Times-Roman', 'Palatino', 'Bookman', 'Helvetica-Narrow', 'Symbol', ...
-                        'AvantGarde', 'NewCenturySchlbk', 'Courier', 'ZapfChancery', 'ZapfDingbats'};
+        % Issue #258: Rearrange standard fonts list based on decending "problematicness"
+        % The issue is still *NOT* fully solved because I cannot figure out how to force
+        % the EPS postscript engine to look for the user's font on disk
+        % Also see: https://stat.ethz.ch/pipermail/r-help/2005-January/064374.html
+        matlab_fonts = {'Helvetica', 'Times', 'Courier', 'Symbol', 'ZapfDingbats', ...
+                        'Palatino', 'Bookman', 'ZapfChancery', 'AvantGarde', ...
+                        'NewCenturySchlbk', 'Helvetica-Narrow'};
         matlab_fontsl = lower(matlab_fonts);
         require_swap = find(~ismember(fontslu, matlab_fontsl));
@@ -211,5 +241,5 @@
         % Compute the order to revert fonts later, without the need of a loop
         [update, M] = unique(update(1:c));
-        [M, M] = sort(M);
+        [dummy, M] = sort(M); %#ok<ASGLU>
         update = reshape(update(M), 1, []);
     end
@@ -228,4 +258,34 @@
     % Set the line color slightly off white
     set(white_line_handles, 'Color', [1 1 1] - 0.00001);
+
+    % MATLAB bug fix (issue #211): dashed/dotted lines in 3D axes appear solid
+    % Note: this "may limit other functionality in plotting such as hidden line/surface removal"
+    % reference: Technical Support Case #02838114, https://mail.google.com/mail/u/0/#inbox/15fb7659f70e7bd8
+    hAxes = findall(fig, 'Type', 'axes');
+    if using_hg2 && ~isempty(hAxes)  % issue #211 presumably happens only in HG2, not HG1
+        try
+            % If there are any axes using SortMethod~='ChildOrder'
+            oldSortMethods = get(hAxes,{'SortMethod'});  % use {'SortMethod'} to ensure we get a cell array, even for single axes
+            if any(~strcmpi('ChildOrder',oldSortMethods))  % i.e., any oldSortMethods=='depth'
+                % Check if the axes look visually different onscreen when SortMethod='ChildOrder'
+                imgBefore = print2array(fig);
+                set(hAxes,'SortMethod','ChildOrder');
+                imgAfter  = print2array(fig);
+                if isequal(imgBefore, imgAfter)
+                    % They look the same, so use SortMethod='ChildOrder' when generating the EPS
+                else
+                    % They look different, so revert SortMethod and issue a warning message
+                    warning('YMA:export_fig:issue211', ...
+                            ['You seem to be using axes that have overlapping/hidden graphic elements. ' 10 ...
+                             'Setting axes.SortMethod=''ChildOrder'' may solve potential problems in EPS/PDF export. ' 10 ...
+                             'Additional info: https://github.com/altmany/export_fig/issues/211'])
+                    set(hAxes,{'SortMethod'},oldSortMethods);
+                end
+            end
+        catch err
+            % ignore
+            a=err;  %#ok<NASGU> % debug breakpoint
+        end
+    end
 
     % Workaround for issue #45: lines in image subplots are exported in invalid color
@@ -259,4 +319,7 @@
     print(fig, options{:}, name);
 
+    % Restore the original axes SortMethods (if updated)
+    try set(hAxes,{'SortMethod'},oldSortMethods); catch, end
+
     % Do post-processing on the eps file
     try
@@ -291,7 +354,13 @@
     end
 
+    % Bail out if EPS post-processing is not possible
+    if isempty(fstrm)
+        warning('Loading EPS file failed, so unable to perform post-processing. This is usually because the figure contains a large number of patch objects. Consider exporting to a bitmap format in this case.');
+        return
+    end
+
     % Fix for Matlab R2014b bug (issue #31): LineWidths<0.75 are not set in the EPS (default line width is used)
     try
-        if ~isempty(fstrm) && using_hg2(fig)
+        if using_hg2(fig)
             % Convert miter joins to line joins
             %fstrm = regexprep(fstrm, '\n10.0 ML\n', '\n1 LJ\n');
@@ -376,6 +445,13 @@
     set(white_line_handles, 'Color', [1 1 1]);
 
+    % Preserve the figure's PaperSize in the output file, if requested (issue #277)
+    if preserve_size
+        % https://stackoverflow.com/questions/19646329/postscript-document-size
+        paper_size = get(fig, 'PaperSize');  % in [points]
+        fstrm = sprintf('<< /PageSize [%d %d] >> setpagedevice\n%s', paper_size, fstrm);
+    end
+
     % Reset paper size
-    set(fig, 'PaperPositionMode', old_pos_mode, 'PaperOrientation', old_orientation);
+    set(fig, 'PaperPositionMode',old_pos_mode, 'PaperOrientation',old_orientation, 'PaperUnits',old_paper_units);
 
     % Reset the font names in the figure
@@ -386,15 +462,25 @@
     end
 
-    % Bail out if EPS post-processing is not possible
-    if isempty(fstrm)
-        warning('Loading EPS file failed, so unable to perform post-processing. This is usually because the figure contains a large number of patch objects. Consider exporting to a bitmap format in this case.');
-        return
-    end
-
     % Replace the font names
     if ~isempty(font_swap)
         for a = 1:size(font_swap, 2)
-            %fstrm = regexprep(fstrm, [font_swap{1,a} '-?[a-zA-Z]*\>'], font_swap{3,a}(~isspace(font_swap{3,a})));
-            fstrm = regexprep(fstrm, font_swap{2,a}, font_swap{3,a}(~isspace(font_swap{3,a})));
+            fontName = font_swap{3,a};
+            %fontName = fontName(~isspace(font_swap{3,a}));
+            if length(fontName) > 29
+                warning('YMA:export_fig:font_name','Font name ''%s'' is longer than 29 characters. This might cause problems in some EPS/PDF readers. Consider using a different font.',fontName);
+            end
+            if isempty(font_space)
+                fontName(fontName==' ') = '';
+            else
+                fontName(fontName==' ') = char(font_space);
+            end
+
+            % Replace all instances of the standard Matlab fonts with the original user's font names
+            %fstrm = regexprep(fstrm, [font_swap{1,a} '-?[a-zA-Z]*\>'], fontName);
+            %fstrm = regexprep(fstrm, [font_swap{2,a} '([ \n])'], [fontName '$1']);
+            %fstrm = regexprep(fstrm, font_swap{2,a}, fontName);  % also replace -Bold, -Italic, -BoldItalic
+
+            % Times-Roman's Bold/Italic fontnames don't include '-Roman'
+            fstrm = regexprep(fstrm, [font_swap{2,a} '(\-Roman)?'], fontName);
         end
     end
@@ -426,8 +512,28 @@
         pagebb_matlab = cell2mat(textscan(aa,'%f32%f32%f32%f32'));  % dimensions bb - STEP2
 
+        % 1b. Fix issue #239: black title meshes with temporary black background figure bgcolor, causing bad cropping
+        hTitles = [];
+        if isequal(get(fig,'Color'),'none')
+            hAxes = findall(fig,'type','axes');
+            for idx = 1 : numel(hAxes)
+                hAx = hAxes(idx);
+                try
+                    hTitle = hAx.Title;
+                    oldColor = hTitle.Color;
+                    if all(oldColor < 5*eps) || (ischar(oldColor) && lower(oldColor(1))=='k')
+                        hTitles(end+1) = hTitle; %#ok<AGROW>
+                        hTitle.Color = [0,0,.01];
+                    end
+                catch
+                end
+            end
+        end
+
         % 2. Create a bitmap image and use crop_borders to create the relative
         %    bb with respect to the PageBoundingBox
         [A, bcol] = print2array(fig, 1, renderer);
-        [aa, aa, aa, bb_rel] = crop_borders(A, bcol, bb_padding, crop_amounts);
+        [aa, aa, aa, bb_rel] = crop_borders(A, bcol, bb_padding, crop_amounts); %#ok<ASGLU>
+
+        try set(hTitles,'Color','k'); catch, end
 
         % 3. Calculate the new Bounding Box
@@ -437,5 +543,5 @@
         %          pagebb_matlab(1)+pagew*bb_rel(3) pagebb_matlab(2)+pageh*bb_rel(4)];
         bb_new = pagebb_matlab([1,2,1,2]) + [pagew,pageh,pagew,pageh].*bb_rel;  % clearer
-        bb_offset = (bb_new-bb_matlab) + [-1,-1,1,1];  % 1px margin so that cropping is not TOO tight
+        bb_offset = (bb_new-bb_matlab) + [-2,-2,2,2];  % 2px margin so that cropping is not TOO tight (issue #195)
 
         % Apply the bounding box padding
@@ -444,7 +550,7 @@
                 bb_padding = round((mean([bb_new(3)-bb_new(1) bb_new(4)-bb_new(2)])*bb_padding)/0.5)*0.5; % ADJUST BB_PADDING
             end
-            add_padding = @(n1, n2, n3, n4) sprintf(' %d', str2double({n1, n2, n3, n4}) + [-bb_padding -bb_padding bb_padding bb_padding] + bb_offset);
+            add_padding = @(n1, n2, n3, n4) sprintf(' %.0f', str2double({n1, n2, n3, n4}) + bb_offset + bb_padding*[-1,-1,1,1]); %#ok<NASGU>
         else
-            add_padding = @(n1, n2, n3, n4) sprintf(' %d', str2double({n1, n2, n3, n4}) + bb_offset); % fix small but noticeable bounding box shift
+            add_padding = @(n1, n2, n3, n4) sprintf(' %.0f', str2double({n1, n2, n3, n4}) + bb_offset); %#ok<NASGU> % fix small but noticeable bounding box shift
         end
         fstrm = regexprep(fstrm, '%%BoundingBox:[ ]+([-]?\d+)[ ]+([-]?\d+)[ ]+([-]?\d+)[ ]+([-]?\d+)', '%%BoundingBox:${add_padding($1, $2, $3, $4)}');
@@ -478,9 +584,11 @@
                     propName = propNames{propIdx};
                     if strcmp(hObj.(propName).ColorType, 'truecoloralpha')
-                        nColors = length(StoredColors);
                         oldColor = hObj.(propName).ColorData;
-                        newColor = uint8([101; 102+floor(nColors/255); mod(nColors,255); 255]);
-                        StoredColors{end+1} = {hObj, propName, oldColor, newColor};
-                        hObj.(propName).ColorData = newColor;
+                        if numel(oldColor)>3 && oldColor(4)~=255  % issue #281: only fix patch/textbox color if it's not opaque
+                            nColors = length(StoredColors);
+                            newColor = uint8([101; 102+floor(nColors/255); mod(nColors,255); 255]);
+                            StoredColors{end+1} = {hObj, propName, oldColor, newColor}; %#ok<AGROW>
+                            hObj.(propName).ColorData = newColor;
+                        end
                     end
                 catch
@@ -508,11 +616,11 @@
                 %Find and replace the RGBA values within the EPS text fstrm
                 if strcmpi(propName,'Face')
-                    oldStr = sprintf(['\n' colorID ' RC\nN\n']);
-                    newStr = sprintf(['\n' origRGB ' RC\n' origAlpha ' .setopacityalpha true\nN\n']);
+                    oldStr = sprintf(['\n' colorID ' RC\n']);  % ...N\n (removed to fix issue #225)
+                    newStr = sprintf(['\n' origRGB ' RC\n' origAlpha ' .setopacityalpha true\n']);  % ...N\n
                 else  %'Edge'
-                    oldStr = sprintf(['\n' colorID ' RC\n1 LJ\n']);
+                    oldStr = sprintf(['\n' colorID ' RC\n']);  % ...1 LJ\n (removed to fix issue #225)
                     newStr = sprintf(['\n' origRGB ' RC\n' origAlpha ' .setopacityalpha true\n']);
                 end
-                foundFlags(objIdx) = ~isempty(strfind(fstrm, oldStr));
+                foundFlags(objIdx) = ~isempty(strfind(fstrm, oldStr)); %#ok<STREMP>
                 fstrm = strrep(fstrm, oldStr, newStr);
 
Index: /issm/trunk/externalpackages/export_fig/user_string.m
===================================================================
--- /issm/trunk/externalpackages/export_fig/user_string.m	(revision 24685)
+++ /issm/trunk/externalpackages/export_fig/user_string.m	(revision 24686)
@@ -33,4 +33,5 @@
 %              errors. Thanks to Christian for pointing this out.
 % 29/05/2015 - Save file in prefdir if current folder is non-writable (issue #74)
+% 09/01/2018 - Fix issue #232: if the string looks like a file/folder path, ensure it actually exists
 
     if ~ischar(string_name)
@@ -102,4 +103,9 @@
         string = fgetl(fid);
         fclose(fid);
+
+        % Fix issue #232: if the string looks like a file/folder path, ensure it actually exists
+        if ~isempty(string) && any(string=='\' | string=='/') && ~exist(string) %#ok<EXIST>
+            string = '';
+        end
     end
 end
Index: /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/frmts/wms/dataset.cpp
===================================================================
--- /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/frmts/wms/dataset.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/frmts/wms/dataset.cpp	(revision 24686)
@@ -0,0 +1,676 @@
+/******************************************************************************
+ * $Id: dataset.cpp 25776 2013-03-20 20:46:48Z rouault $
+ *
+ * Project:  WMS Client Driver
+ * Purpose:  Implementation of Dataset and RasterBand classes for WMS
+ *           and other similar services.
+ * Author:   Adam Nowacki, nowak@xpam.de
+ *
+ ******************************************************************************
+ * Copyright (c) 2007, Adam Nowacki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ****************************************************************************
+ *
+ * dataset.cpp:
+ * Initialization of the GDALWMSdriver, parsing the XML configuration file,
+ * instantiation of the minidrivers and accessors used by minidrivers
+ *
+ ***************************************************************************/
+
+
+#include "stdinc.h"
+
+GDALWMSDataset::GDALWMSDataset() {
+    m_mini_driver = 0;
+    m_cache = 0;
+    m_hint.m_valid = false;
+    m_data_type = GDT_Byte;
+    m_clamp_requests = true;
+    m_unsafeSsl = false;
+    m_data_window.m_sx = -1;
+    nBands = 0;
+    m_default_block_size_x = 1024;
+    m_default_block_size_y = 1024;
+    m_bNeedsDataWindow = TRUE;
+    m_default_tile_count_x = 1;
+    m_default_tile_count_y = 1;
+    m_default_overview_count = -1;
+    m_zeroblock_on_serverexceptions = 0;
+    m_poColorTable = NULL;
+}
+
+GDALWMSDataset::~GDALWMSDataset() {
+    if (m_mini_driver) delete m_mini_driver;
+    if (m_cache) delete m_cache;
+    if (m_poColorTable) delete m_poColorTable;
+}
+
+CPLErr GDALWMSDataset::Initialize(CPLXMLNode *config) {
+    CPLErr ret = CE_None;
+
+    char* pszXML = CPLSerializeXMLTree( config );
+    if (pszXML)
+    {
+        m_osXML = pszXML;
+        CPLFree(pszXML);
+    }
+
+    // Initialize the minidriver, which can set parameters for the dataset using member functions
+    CPLXMLNode *service_node = CPLGetXMLNode(config, "Service");
+    if (service_node != NULL)
+    {
+        const CPLString service_name = CPLGetXMLValue(service_node, "name", "");
+        if (!service_name.empty())
+        {
+            GDALWMSMiniDriverManager *const mdm = GetGDALWMSMiniDriverManager();
+            GDALWMSMiniDriverFactory *const mdf = mdm->Find(service_name);
+            if (mdf != NULL)
+            {
+                m_mini_driver = mdf->New();
+                m_mini_driver->m_parent_dataset = this;
+                if (m_mini_driver->Initialize(service_node) == CE_None)
+                {
+                    m_mini_driver_caps.m_capabilities_version = -1;
+                    m_mini_driver->GetCapabilities(&m_mini_driver_caps);
+                    if (m_mini_driver_caps.m_capabilities_version == -1)
+                    {
+                        CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Internal error, mini-driver capabilities version not set.");
+                        ret = CE_Failure;
+                    }
+                }
+                else
+                {
+                    delete m_mini_driver;
+                    m_mini_driver = NULL;
+
+                    CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Failed to initialize minidriver.");
+                    ret = CE_Failure;
+                }
+            }
+            else
+            {
+                CPLError(CE_Failure, CPLE_AppDefined,
+                                "GDALWMS: No mini-driver registered for '%s'.", service_name.c_str());
+                ret = CE_Failure;
+            }
+        }
+        else
+        {
+            CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: No Service specified.");
+            ret = CE_Failure;
+        }
+    }
+    else
+    {
+        CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: No Service specified.");
+        ret = CE_Failure;
+    }
+
+
+    /*
+    Parameters that could be set by minidriver already, based on server side information.
+    If the size is set, minidriver has done this already
+    A "server" side minidriver needs to set at least:
+      - Blocksize (x and y)
+      - Clamp flag (defaults to true)
+      - DataWindow
+      - Band Count
+      - Data Type
+    It should also initialize and register the bands and overviews.
+    */
+
+    if (m_data_window.m_sx<1)
+    {
+        int nOverviews = 0;
+
+        if (ret == CE_None)
+        {
+            m_block_size_x = atoi(CPLGetXMLValue(config, "BlockSizeX", CPLString().Printf("%d", m_default_block_size_x)));
+            m_block_size_y = atoi(CPLGetXMLValue(config, "BlockSizeY", CPLString().Printf("%d", m_default_block_size_y)));
+            if (m_block_size_x <= 0 || m_block_size_y <= 0)
+            {
+                CPLError( CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value in BlockSizeX or BlockSizeY" );
+                ret = CE_Failure;
+            }
+        }
+
+        if (ret == CE_None)
+        {
+            m_clamp_requests = StrToBool(CPLGetXMLValue(config, "ClampRequests", "true"));
+            if (m_clamp_requests<0)
+            {
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of ClampRequests, true/false expected.");
+                ret = CE_Failure;
+            }
+        }
+
+        if (ret == CE_None)
+        {
+            CPLXMLNode *data_window_node = CPLGetXMLNode(config, "DataWindow");
+            if (data_window_node == NULL && m_bNeedsDataWindow)
+            {
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: DataWindow missing.");
+                ret = CE_Failure;
+            }
+            else
+            {
+                CPLString osDefaultX0, osDefaultX1, osDefaultY0, osDefaultY1;
+                CPLString osDefaultTileCountX, osDefaultTileCountY, osDefaultTileLevel;
+                CPLString osDefaultOverviewCount;
+                osDefaultX0.Printf("%.8f", m_default_data_window.m_x0);
+                osDefaultX1.Printf("%.8f", m_default_data_window.m_x1);
+                osDefaultY0.Printf("%.8f", m_default_data_window.m_y0);
+                osDefaultY1.Printf("%.8f", m_default_data_window.m_y1);
+                osDefaultTileCountX.Printf("%d", m_default_tile_count_x);
+                osDefaultTileCountY.Printf("%d", m_default_tile_count_y);
+                if (m_default_data_window.m_tlevel >= 0)
+                    osDefaultTileLevel.Printf("%d", m_default_data_window.m_tlevel);
+                if (m_default_overview_count >= 0)
+                    osDefaultOverviewCount.Printf("%d", m_default_overview_count);
+                const char *overview_count = CPLGetXMLValue(config, "OverviewCount", osDefaultOverviewCount);
+                const char *ulx = CPLGetXMLValue(data_window_node, "UpperLeftX", osDefaultX0);
+                const char *uly = CPLGetXMLValue(data_window_node, "UpperLeftY", osDefaultY0);
+                const char *lrx = CPLGetXMLValue(data_window_node, "LowerRightX", osDefaultX1);
+                const char *lry = CPLGetXMLValue(data_window_node, "LowerRightY", osDefaultY1);
+                const char *sx = CPLGetXMLValue(data_window_node, "SizeX", "");
+                const char *sy = CPLGetXMLValue(data_window_node, "SizeY", "");
+                const char *tx = CPLGetXMLValue(data_window_node, "TileX", "0");
+                const char *ty = CPLGetXMLValue(data_window_node, "TileY", "0");
+                const char *tlevel = CPLGetXMLValue(data_window_node, "TileLevel", osDefaultTileLevel);
+                const char *str_tile_count_x = CPLGetXMLValue(data_window_node, "TileCountX", osDefaultTileCountX);
+                const char *str_tile_count_y = CPLGetXMLValue(data_window_node, "TileCountY", osDefaultTileCountY);
+                const char *y_origin = CPLGetXMLValue(data_window_node, "YOrigin", "default");
+
+                if (ret == CE_None)
+                {
+                    if ((ulx[0] != '\0') && (uly[0] != '\0') && (lrx[0] != '\0') && (lry[0] != '\0'))
+                    {
+                        m_data_window.m_x0 = atof(ulx);
+                        m_data_window.m_y0 = atof(uly);
+                        m_data_window.m_x1 = atof(lrx);
+                        m_data_window.m_y1 = atof(lry);
+                    }
+                    else
+                    {
+                        CPLError(CE_Failure, CPLE_AppDefined,
+                                 "GDALWMS: Mandatory elements of DataWindow missing: UpperLeftX, UpperLeftY, LowerRightX, LowerRightY.");
+                        ret = CE_Failure;
+                    }
+                }
+
+                m_data_window.m_tlevel = atoi(tlevel);
+
+                if (ret == CE_None)
+                {
+                    if ((sx[0] != '\0') && (sy[0] != '\0'))
+                    {
+                        m_data_window.m_sx = atoi(sx);
+                        m_data_window.m_sy = atoi(sy);
+                    }
+                    else if ((tlevel[0] != '\0') && (str_tile_count_x[0] != '\0') && (str_tile_count_y[0] != '\0'))
+                    {
+                        int tile_count_x = atoi(str_tile_count_x);
+                        int tile_count_y = atoi(str_tile_count_y);
+                        m_data_window.m_sx = tile_count_x * m_block_size_x * (1 << m_data_window.m_tlevel);
+                        m_data_window.m_sy = tile_count_y * m_block_size_y * (1 << m_data_window.m_tlevel);
+                    }
+                    else
+                    {
+                        CPLError(CE_Failure, CPLE_AppDefined,
+                                 "GDALWMS: Mandatory elements of DataWindow missing: SizeX, SizeY.");
+                        ret = CE_Failure;
+                    }
+                }
+                if (ret == CE_None)
+                {
+                    if ((tx[0] != '\0') && (ty[0] != '\0'))
+                    {
+                        m_data_window.m_tx = atoi(tx);
+                        m_data_window.m_ty = atoi(ty);
+                    }
+                    else
+                    {
+                        CPLError(CE_Failure, CPLE_AppDefined,
+                                 "GDALWMS: Mandatory elements of DataWindow missing: TileX, TileY.");
+                        ret = CE_Failure;
+                    }
+                }
+
+                if (ret == CE_None)
+                {
+                    if (overview_count[0] != '\0')
+                    {
+                        nOverviews = atoi(overview_count);
+                    }
+                    else if (tlevel[0] != '\0')
+                    {
+                        nOverviews = m_data_window.m_tlevel;
+                    }
+                    else
+                    {
+                        const int min_overview_size = MAX(32, MIN(m_block_size_x, m_block_size_y));
+                        double a = log(static_cast<double>(MIN(m_data_window.m_sx, m_data_window.m_sy))) / log(2.0)
+                            - log(static_cast<double>(min_overview_size)) / log(2.0);
+                        nOverviews = MAX(0, MIN(static_cast<int>(ceil(a)), 32));
+                    }
+                }
+                if (ret == CE_None)
+                {
+                    CPLString y_origin_str = y_origin;
+                    if (y_origin_str == "top") {
+                        m_data_window.m_y_origin = GDALWMSDataWindow::TOP;
+                    } else if (y_origin_str == "bottom") {
+                        m_data_window.m_y_origin = GDALWMSDataWindow::BOTTOM;
+                    } else if (y_origin_str == "default") {
+                        m_data_window.m_y_origin = GDALWMSDataWindow::DEFAULT;
+                    } else {
+                        CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: DataWindow YOrigin must be set to "
+                            "one of 'default', 'top', or 'bottom', not '%s'.", y_origin_str.c_str());
+                        ret = CE_Failure;
+                    }
+                }
+            }
+        }
+
+        if (ret == CE_None)
+        {
+            if (nBands<1)
+                nBands=atoi(CPLGetXMLValue(config,"BandsCount","3"));
+            if (nBands<1)
+            {
+                CPLError(CE_Failure, CPLE_AppDefined,
+                         "GDALWMS: Bad number of bands.");
+                ret = CE_Failure;
+            }
+        }
+
+        if (ret == CE_None)
+        {
+            const char *data_type = CPLGetXMLValue(config, "DataType", "Byte");
+            m_data_type = GDALGetDataTypeByName( data_type );
+            if ( m_data_type == GDT_Unknown || m_data_type >= GDT_TypeCount )
+            {
+                CPLError( CE_Failure, CPLE_AppDefined,
+                          "GDALWMS: Invalid value in DataType. Data type \"%s\" is not supported.", data_type );
+                ret = CE_Failure;
+            }
+        }
+
+        // Initialize the bands and the overviews.  Assumes overviews are powers of two
+        if (ret == CE_None)
+        {
+            nRasterXSize = m_data_window.m_sx;
+            nRasterYSize = m_data_window.m_sy;
+
+            if (!GDALCheckDatasetDimensions(nRasterXSize, nRasterYSize) ||
+                !GDALCheckBandCount(nBands, TRUE))
+            {
+                return CE_Failure;
+            }
+
+            GDALColorInterp default_color_interp[4][4] = {
+                { GCI_GrayIndex, GCI_Undefined, GCI_Undefined, GCI_Undefined },
+                { GCI_GrayIndex, GCI_AlphaBand, GCI_Undefined, GCI_Undefined },
+                { GCI_RedBand, GCI_GreenBand, GCI_BlueBand, GCI_Undefined },
+                { GCI_RedBand, GCI_GreenBand, GCI_BlueBand, GCI_AlphaBand }
+            };
+            for (int i = 0; i < nBands; ++i)
+            {
+                GDALColorInterp color_interp = (nBands <= 4 && i <= 3 ? default_color_interp[nBands - 1][i] : GCI_Undefined);
+                GDALWMSRasterBand *band = new GDALWMSRasterBand(this, i, 1.0);
+                band->m_color_interp = color_interp;
+                SetBand(i + 1, band);
+                double scale = 0.5;
+                for (int j = 0; j < nOverviews; ++j)
+                {
+                    band->AddOverview(scale);
+                    band->m_color_interp = color_interp;
+                    scale *= 0.5;
+                }
+            }
+        }
+    }
+
+    // UserPwd
+    const char *pszUserPwd = CPLGetXMLValue(config, "UserPwd", "");
+    if (pszUserPwd[0] != '\0')
+        m_osUserPwd = pszUserPwd;
+
+    const char *pszUserAgent = CPLGetXMLValue(config, "UserAgent", "");
+    if (pszUserAgent[0] != '\0')
+        m_osUserAgent = pszUserAgent;
+
+    const char *pszReferer = CPLGetXMLValue(config, "Referer", "");
+    if (pszReferer[0] != '\0')
+        m_osReferer = pszReferer;
+
+    if (ret == CE_None) {
+        const char *pszHttpZeroBlockCodes = CPLGetXMLValue(config, "ZeroBlockHttpCodes", "");
+        if(pszHttpZeroBlockCodes[0] == '\0') {
+            m_http_zeroblock_codes.push_back(204);
+        } else {
+            char **kv = CSLTokenizeString2(pszHttpZeroBlockCodes,",",CSLT_HONOURSTRINGS);
+            int nCount = CSLCount(kv);
+            for(int i=0; i<nCount; i++) {
+                int code = atoi(kv[i]);
+                if(code <= 0) {
+                    CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of ZeroBlockHttpCodes \"%s\", comma separated HTTP response codes expected.",
+                            kv[i]);
+                    ret = CE_Failure;
+                    break;
+                }
+                m_http_zeroblock_codes.push_back(code);
+            }
+            CSLDestroy(kv);
+        }
+    }
+
+    if (ret == CE_None) {
+        const char *pszZeroExceptions = CPLGetXMLValue(config, "ZeroBlockOnServerException", "");
+        if(pszZeroExceptions[0] != '\0') {
+            m_zeroblock_on_serverexceptions = StrToBool(pszZeroExceptions);
+            if (m_zeroblock_on_serverexceptions == -1) {
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of ZeroBlockOnServerException \"%s\", true/false expected.",
+                     pszZeroExceptions);
+                ret = CE_Failure;
+            }
+        }
+    }
+
+    if (ret == CE_None) {
+        const char *max_conn = CPLGetXMLValue(config, "MaxConnections", "");
+        if (max_conn[0] != '\0') {
+            m_http_max_conn = atoi(max_conn);
+        } else {
+            m_http_max_conn = 2;
+        }
+    }
+    if (ret == CE_None) {
+        const char *timeout = CPLGetXMLValue(config, "Timeout", "");
+        if (timeout[0] != '\0') {
+            m_http_timeout = atoi(timeout);
+        } else {
+            m_http_timeout = 300;
+        }
+    }
+    if (ret == CE_None) {
+        const char *offline_mode = CPLGetXMLValue(config, "OfflineMode", "");
+        if (offline_mode[0] != '\0') {
+            const int offline_mode_bool = StrToBool(offline_mode);
+            if (offline_mode_bool == -1) {
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of OfflineMode, true / false expected.");
+                ret = CE_Failure;
+            } else {
+                m_offline_mode = offline_mode_bool;
+            }
+        } else {
+            m_offline_mode = 0;
+        }
+    }
+
+    if (ret == CE_None) {
+        const char *advise_read = CPLGetXMLValue(config, "AdviseRead", "");
+        if (advise_read[0] != '\0') {
+            const int advise_read_bool = StrToBool(advise_read);
+            if (advise_read_bool == -1) {
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of AdviseRead, true / false expected.");
+                ret = CE_Failure;
+            } else {
+                m_use_advise_read = advise_read_bool;
+            }
+        } else {
+            m_use_advise_read = 0;
+        }
+    }
+
+    if (ret == CE_None) {
+        const char *verify_advise_read = CPLGetXMLValue(config, "VerifyAdviseRead", "");
+        if (m_use_advise_read) {
+            if (verify_advise_read[0] != '\0') {
+                const int verify_advise_read_bool = StrToBool(verify_advise_read);
+                if (verify_advise_read_bool == -1) {
+                    CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of VerifyAdviseRead, true / false expected.");
+                    ret = CE_Failure;
+                } else {
+                    m_verify_advise_read = verify_advise_read_bool;
+                }
+            } else {
+                m_verify_advise_read = 1;
+            }
+        }
+    }
+
+    // Let the local configuration override the minidriver supplied projection
+
+    if (ret == CE_None) {
+        const char *proj = CPLGetXMLValue(config, "Projection", "");
+        if (proj[0] != '\0') {
+            m_projection = ProjToWKT(proj);
+            if (m_projection.size() == 0) {
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Bad projection specified.");
+                ret = CE_Failure;
+            }
+        }
+    }
+
+    // Same for Min, Max and NoData, defined per band or per dataset
+    // If they are set as null strings, they clear the server declared values
+    if (ret == CE_None) {
+       // Data values are attributes, they include NoData Min and Max
+       // TODO: document those options
+       if (0!=CPLGetXMLNode(config,"DataValues")) {
+           const char *nodata=CPLGetXMLValue(config,"DataValues.NoData",NULL);
+           if (nodata!=NULL) WMSSetNoDataValue(nodata);
+           const char *min=CPLGetXMLValue(config,"DataValues.min",NULL);
+           if (min!=NULL) WMSSetMinValue(min);
+           const char *max=CPLGetXMLValue(config,"DataValues.max",NULL);
+           if (max!=NULL) WMSSetMaxValue(max);
+       }
+    }
+
+    if (ret == CE_None) {
+        CPLXMLNode *cache_node = CPLGetXMLNode(config, "Cache");
+        if (cache_node != NULL) {
+            m_cache = new GDALWMSCache();
+            if (m_cache->Initialize(cache_node) != CE_None) {
+                delete m_cache;
+                m_cache = NULL;
+                CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Failed to initialize cache.");
+                ret = CE_Failure;
+            }
+        }
+    }
+
+    if (ret == CE_None) {
+    	const int v = StrToBool(CPLGetXMLValue(config, "UnsafeSSL", "false"));
+    	if (v == -1) {
+	    CPLError(CE_Failure, CPLE_AppDefined, "GDALWMS: Invalid value of UnsafeSSL: true or false expected.");
+	    ret = CE_Failure;
+	} else {
+	    m_unsafeSsl = v;
+	}
+    }
+
+    if (ret == CE_None) {
+        /* If we dont have projection already set ask mini-driver. */
+        if (!m_projection.size()) {
+            const char *proj = m_mini_driver->GetProjectionInWKT();
+            if (proj != NULL) {
+                m_projection = proj;
+            }
+        }
+    }
+
+    return ret;
+}
+
+CPLErr GDALWMSDataset::IRasterIO(GDALRWFlag rw, int x0, int y0, int sx, int sy, void *buffer, int bsx, int bsy, GDALDataType bdt, int band_count, int *band_map, int pixel_space, int line_space, int band_space) {
+    CPLErr ret;
+
+    if (rw != GF_Read) return CE_Failure;
+    if (buffer == NULL) return CE_Failure;
+    if ((sx == 0) || (sy == 0) || (bsx == 0) || (bsy == 0) || (band_count == 0)) return CE_None;
+
+    m_hint.m_x0 = x0;
+    m_hint.m_y0 = y0;
+    m_hint.m_sx = sx;
+    m_hint.m_sy = sy;
+    m_hint.m_overview = -1;
+    m_hint.m_valid = true;
+    //	printf("[%p] GDALWMSDataset::IRasterIO(x0: %d, y0: %d, sx: %d, sy: %d, bsx: %d, bsy: %d, band_count: %d, band_map: %p)\n", this, x0, y0, sx, sy, bsx, bsy, band_count, band_map);
+    ret = GDALDataset::IRasterIO(rw, x0, y0, sx, sy, buffer, bsx, bsy, bdt, band_count, band_map, pixel_space, line_space, band_space);
+    m_hint.m_valid = false;
+
+    return ret;
+}
+
+const char *GDALWMSDataset::GetProjectionRef() {
+    return m_projection.c_str();
+}
+
+CPLErr GDALWMSDataset::SetProjection(const char *proj) {
+    return CE_Failure;
+}
+
+CPLErr GDALWMSDataset::GetGeoTransform(double *gt) {
+    gt[0] = m_data_window.m_x0;
+    gt[1] = (m_data_window.m_x1 - m_data_window.m_x0) / static_cast<double>(m_data_window.m_sx);
+    gt[2] = 0.0;
+    gt[3] = m_data_window.m_y0;
+    gt[4] = 0.0;
+    gt[5] = (m_data_window.m_y1 - m_data_window.m_y0) / static_cast<double>(m_data_window.m_sy);
+    return CE_None;
+}
+
+CPLErr GDALWMSDataset::SetGeoTransform(double *gt) {
+    return CE_Failure;
+}
+
+const GDALWMSDataWindow *GDALWMSDataset::WMSGetDataWindow() const {
+    return &m_data_window;
+}
+
+void GDALWMSDataset::WMSSetBlockSize(int x, int y) {
+    m_block_size_x=x;
+    m_block_size_y=y;
+}
+
+void GDALWMSDataset::WMSSetRasterSize(int x, int y) {
+    nRasterXSize=x;
+    nRasterYSize=y;
+}
+
+void GDALWMSDataset::WMSSetBandsCount(int count) {
+    nBands=count;
+}
+
+void GDALWMSDataset::WMSSetClamp(bool flag=true) {
+    m_clamp_requests=flag;
+}
+
+void GDALWMSDataset::WMSSetDataType(GDALDataType type) {
+    m_data_type=type;
+}
+
+void GDALWMSDataset::WMSSetDataWindow(GDALWMSDataWindow &window) {
+    m_data_window=window;
+}
+
+void GDALWMSDataset::WMSSetDefaultBlockSize(int x, int y) {
+    m_default_block_size_x=x;
+    m_default_block_size_y=y;
+}
+
+void GDALWMSDataset::WMSSetDefaultDataWindowCoordinates(double x0, double y0, double x1, double y1)
+{
+    m_default_data_window.m_x0 = x0;
+    m_default_data_window.m_y0 = y0;
+    m_default_data_window.m_x1 = x1;
+    m_default_data_window.m_y1 = y1;
+}
+
+void GDALWMSDataset::WMSSetDefaultTileCount(int tilecountx, int tilecounty)
+{
+    m_default_tile_count_x = tilecountx;
+    m_default_tile_count_y = tilecounty;
+}
+
+void GDALWMSDataset::WMSSetDefaultTileLevel(int tlevel)
+{
+    m_default_data_window.m_tlevel = tlevel;
+}
+
+void GDALWMSDataset::WMSSetDefaultOverviewCount(int overview_count)
+{
+    m_default_overview_count = overview_count;
+}
+
+void GDALWMSDataset::WMSSetNeedsDataWindow(int flag)
+{
+    m_bNeedsDataWindow = flag;
+}
+
+static void list2vec(std::vector<double> &v,const char *pszList)
+{
+    if ((pszList==NULL)||(pszList[0]==0)) return;
+    char **papszTokens=CSLTokenizeString2(pszList," \t\n\r",
+        CSLT_STRIPLEADSPACES|CSLT_STRIPENDSPACES);
+    v.clear();
+    for (int i=0;i<CSLCount(papszTokens);i++)
+        v.push_back(CPLStrtod(papszTokens[i],NULL));
+    CSLDestroy(papszTokens);
+}
+
+void GDALWMSDataset::WMSSetNoDataValue(const char * pszNoData)
+{
+    list2vec(vNoData,pszNoData);
+}
+
+void GDALWMSDataset::WMSSetMinValue(const char * pszMin)
+{
+    list2vec(vMin,pszMin);
+}
+
+void GDALWMSDataset::WMSSetMaxValue(const char * pszMax)
+{
+    list2vec(vMax,pszMax);
+}
+
+CPLErr GDALWMSDataset::AdviseRead(int x0, int y0, int sx, int sy, int bsx, int bsy, GDALDataType bdt, int band_count, int *band_map, char **options) {
+//    printf("AdviseRead(%d, %d, %d, %d)\n", x0, y0, sx, sy);
+    if (m_offline_mode || !m_use_advise_read) return CE_None;
+    if (m_cache == NULL) return CE_Failure;
+
+    GDALRasterBand *band = GetRasterBand(1);
+    if (band == NULL) return CE_Failure;
+    return band->AdviseRead(x0, y0, sx, sy, bsx, bsy, bdt, options);
+}
+
+const char *GDALWMSDataset::GetMetadataItem( const char * pszName,
+                                             const char * pszDomain )
+{
+    if( pszName != NULL && EQUAL(pszName, "XML") &&
+        pszDomain != NULL && EQUAL(pszDomain, "WMS") )
+    {
+        return (m_osXML.size()) ? m_osXML.c_str() : NULL;
+    }
+
+    return GDALPamDataset::GetMetadataItem(pszName, pszDomain);
+}
Index: /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/ogr/ogrsf_frmts/vfk/vfkfeature.cpp
===================================================================
--- /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/ogr/ogrsf_frmts/vfk/vfkfeature.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/ogr/ogrsf_frmts/vfk/vfkfeature.cpp	(revision 24686)
@@ -0,0 +1,560 @@
+/******************************************************************************
+ * $Id: vfkfeature.cpp 25702 2013-03-07 17:17:54Z martinl $
+ *
+ * Project:  VFK Reader - Feature definition
+ * Purpose:  Implements IVFKFeature/VFKFeature class.
+ * Author:   Martin Landa, landa.martin gmail.com
+ *
+ ******************************************************************************
+ * Copyright (c) 2009-2010, 2012-2013, Martin Landa <landa.martin gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ ****************************************************************************/
+
+#include "vfkreader.h"
+#include "vfkreaderp.h"
+
+#include "cpl_conv.h"
+#include "cpl_error.h"
+
+/*!
+  \brief IVFKFeature constructor
+
+  \param poDataBlock pointer to VFKDataBlock instance
+*/
+IVFKFeature::IVFKFeature(IVFKDataBlock *poDataBlock)
+{
+    CPLAssert(NULL != poDataBlock);
+    m_poDataBlock   = poDataBlock;
+
+    m_nFID          = -1;
+    m_nGeometryType = poDataBlock->GetGeometryType();
+    m_bGeometry     = FALSE;
+    m_bValid        = FALSE;
+    m_paGeom        = NULL;
+}
+
+/*!
+  \brief IVFKFeature destructor
+*/
+IVFKFeature::~IVFKFeature()
+{
+    if (m_paGeom)
+        delete m_paGeom;
+
+    m_poDataBlock = NULL;
+}
+
+/*!
+  \brief Set feature geometry type
+*/
+void IVFKFeature::SetGeometryType(OGRwkbGeometryType nGeomType)
+{
+    m_nGeometryType = nGeomType;
+}
+
+/*!
+  \brief Set feature id
+
+  FID: 0 for next, -1 for same
+
+  \param nFID feature id
+*/
+void IVFKFeature::SetFID(long nFID)
+{
+    if (m_nFID > 0) {
+        m_nFID = nFID;
+    }
+    else {
+        m_nFID = m_poDataBlock->GetFeatureCount() + 1;
+    }
+}
+
+/*!
+  \brief Set feature geometry
+
+  Also checks if given geometry is valid
+
+  \param poGeom pointer to OGRGeometry
+
+  \return TRUE on valid feature or otherwise FALSE
+*/
+bool IVFKFeature::SetGeometry(OGRGeometry *poGeom)
+{
+    m_bGeometry = TRUE;
+
+    delete m_paGeom;
+    m_paGeom = NULL;
+    m_bValid = TRUE;
+
+    if (!poGeom) {
+	return m_bValid;
+    }
+
+    /* check empty geometries */
+    if (m_nGeometryType == wkbNone && poGeom->IsEmpty()) {
+	CPLDebug("OGR-VFK", "%s: empty geometry fid = %ld",
+		 m_poDataBlock->GetName(), m_nFID);
+        m_bValid = FALSE;
+    }
+
+    /* check coordinates */
+    if (m_nGeometryType == wkbPoint) {
+        double x, y;
+        x = ((OGRPoint *) poGeom)->getX();
+        y = ((OGRPoint *) poGeom)->getY();
+        if (x > -430000 || x < -910000 ||
+            y > -930000 || y < -1230000) {
+            CPLDebug("OGR-VFK", "%s: invalid point fid = %ld",
+                     m_poDataBlock->GetName(), m_nFID);
+            m_bValid = FALSE;
+        }
+    }
+
+    /* check degenerated linestrings */
+    if (m_nGeometryType == wkbLineString &&
+        ((OGRLineString *) poGeom)->getNumPoints() < 2) {
+        CPLDebug("OGR-VFK", "%s: invalid linestring fid = %ld",
+		 m_poDataBlock->GetName(), m_nFID);
+        m_bValid = FALSE;
+    }
+
+    /* check degenerated polygons */
+    if (m_nGeometryType == wkbPolygon) {
+        OGRLinearRing *poRing;
+        poRing = ((OGRPolygon *) poGeom)->getExteriorRing();
+        if (!poRing || poRing->getNumPoints() < 3) {
+	    CPLDebug("OGR-VFK", "%s: invalid polygon fid = %ld",
+		     m_poDataBlock->GetName(), m_nFID);
+            m_bValid = FALSE;
+	}
+    }
+
+    if (m_bValid)
+        m_paGeom = (OGRGeometry *) poGeom->clone(); /* make copy */
+
+    return m_bValid;
+}
+
+/*!
+  \brief Get feature geometry
+
+  \return pointer to OGRGeometry or NULL on error
+*/
+OGRGeometry *IVFKFeature::GetGeometry()
+{
+    if (m_nGeometryType != wkbNone && !m_bGeometry)
+        LoadGeometry();
+
+    return m_paGeom;
+}
+
+
+/*!
+  \brief Load geometry
+
+  \return TRUE on success or FALSE on failure
+*/
+bool IVFKFeature::LoadGeometry()
+{
+    const char *pszName;
+    CPLString osSQL;
+
+    if (m_bGeometry)
+        return TRUE;
+
+    pszName  = m_poDataBlock->GetName();
+
+    if (EQUAL (pszName, "SOBR") ||
+        EQUAL (pszName, "OBBP") ||
+        EQUAL (pszName, "SPOL") ||
+        EQUAL (pszName, "OB") ||
+        EQUAL (pszName, "OP") ||
+        EQUAL (pszName, "OBPEJ")) {
+        /* -> wkbPoint */
+
+        return LoadGeometryPoint();
+    }
+    else if (EQUAL (pszName, "SBP")) {
+        /* -> wkbLineString */
+        return LoadGeometryLineStringSBP();
+    }
+    else if (EQUAL (pszName, "HP") ||
+             EQUAL (pszName, "DPM")) {
+        /* -> wkbLineString */
+        return LoadGeometryLineStringHP();
+    }
+    else if (EQUAL (pszName, "PAR") ||
+             EQUAL (pszName, "BUD")) {
+        /* -> wkbPolygon */
+        return LoadGeometryPolygon();
+    }
+
+    return FALSE;
+}
+
+/*!
+  \brief VFKFeature constructor
+
+  \param poDataBlock pointer to VFKDataBlock instance
+*/
+VFKFeature::VFKFeature(IVFKDataBlock *poDataBlock, long iFID) : IVFKFeature(poDataBlock)
+{
+    m_nFID = iFID;
+    m_propertyList.assign(poDataBlock->GetPropertyCount(), VFKProperty());
+    CPLAssert(size_t (poDataBlock->GetPropertyCount()) == m_propertyList.size());
+}
+
+/*!
+  \brief Set feature properties
+
+  \param pszLine pointer to line containing feature definition
+
+  \return TRUE on success or FALSE on failure
+*/
+bool VFKFeature::SetProperties(const char *pszLine)
+{
+    unsigned int iIndex, nLength;
+    const char *poChar, *poProp;
+    char* pszProp;
+    bool inString;
+
+    std::vector<CPLString> oPropList;
+
+    pszProp = NULL;
+
+    for (poChar = pszLine; *poChar != '\0' && *poChar != ';'; poChar++)
+        /* skip data block name */
+        ;
+    if( *poChar == '\0' )
+        return FALSE; /* nothing to read */
+
+    poChar++; /* skip ';' after data block name*/
+
+    /* read properties into the list */
+    poProp = poChar;
+    iIndex = nLength = 0;
+    inString = FALSE;
+    while(*poChar != '\0') {
+        if (*poChar == '"' &&
+            (*(poChar-1) == ';' || *(poChar+1) == ';' || *(poChar+1) == '\0')) {
+            poChar++; /* skip '"' */
+            inString = inString ? FALSE : TRUE;
+            if (inString) {
+                poProp = poChar;
+                if (*poChar == '"') {
+                    poChar++;
+                    inString = FALSE;
+                }
+            }
+            if (*poChar == '\0')
+                break;
+        }
+        if (*poChar == ';' && !inString) {
+            pszProp = (char *) CPLRealloc(pszProp, nLength + 1);
+            if (nLength > 0)
+                strncpy(pszProp, poProp, nLength);
+            pszProp[nLength] = '\0';
+            oPropList.push_back(pszProp);
+            iIndex++;
+            poProp = ++poChar;
+            nLength = 0;
+        }
+        else {
+            poChar++;
+            nLength++;
+        }
+    }
+    /* append last property */
+    if (inString) {
+        nLength--; /* ignore '"' */
+    }
+    pszProp = (char *) CPLRealloc(pszProp, nLength + 1);
+    if (nLength > 0)
+        strncpy(pszProp, poProp, nLength);
+    pszProp[nLength] = '\0';
+    oPropList.push_back(pszProp);
+
+    /* set properties from the list */
+    if (oPropList.size() != (size_t) m_poDataBlock->GetPropertyCount()) {
+        /* try to read also invalid records */
+        CPLDebug("OGR-VFK", "%s: invalid number of properties %d should be %d",
+                 m_poDataBlock->GetName(),
+		 (int) oPropList.size(), m_poDataBlock->GetPropertyCount());
+        return FALSE;
+   }
+    iIndex = 0;
+    for (std::vector<CPLString>::iterator ip = oPropList.begin();
+	 ip != oPropList.end(); ++ip) {
+	SetProperty(iIndex++, (*ip).c_str());
+    }
+
+    /* set fid
+    if (EQUAL(m_poDataBlock->GetName(), "SBP")) {
+        GUIntBig id;
+        const VFKProperty *poVfkProperty;
+
+        poVfkProperty = GetProperty("PORADOVE_CISLO_BODU");
+        if (poVfkProperty)
+        {
+            id = strtoul(poVfkProperty->GetValueS(), NULL, 0);
+            if (id == 1)
+                SetFID(0);
+            else
+                SetFID(-1);
+        }
+    }
+    else {
+        SetFID(0);
+    }
+    */
+    CPLFree(pszProp);
+
+    return TRUE;
+}
+
+/*!
+  \brief Set feature property
+
+  \param iIndex property index
+  \param pszValue property value
+
+  \return TRUE on success
+  \return FALSE on failure
+*/
+bool VFKFeature::SetProperty(int iIndex, const char *pszValue)
+{
+    if (iIndex < 0 || iIndex >= m_poDataBlock->GetPropertyCount() ||
+	size_t(iIndex) >= m_propertyList.size())
+        return FALSE;
+
+    if (strlen(pszValue) < 1)
+        m_propertyList[iIndex] = VFKProperty();
+    else {
+        OGRFieldType fType;
+
+        const char *pszEncoding;
+        char       *pszValueEnc;
+
+        fType = m_poDataBlock->GetProperty(iIndex)->GetType();
+        switch (fType) {
+        case OFTInteger:
+            m_propertyList[iIndex] = VFKProperty(atoi(pszValue));
+            break;
+        case OFTReal:
+            m_propertyList[iIndex] = VFKProperty(CPLAtof(pszValue));
+            break;
+        default:
+            pszEncoding = m_poDataBlock->GetProperty(iIndex)->GetEncoding();
+            if (pszEncoding) {
+                pszValueEnc = CPLRecode(pszValue, pszEncoding,
+                                        CPL_ENC_UTF8);
+                m_propertyList[iIndex] = VFKProperty(pszValueEnc);
+                CPLFree(pszValueEnc);
+            }
+            else {
+                m_propertyList[iIndex] = VFKProperty(pszValue);
+            }
+            break;
+        }
+    }
+    return TRUE;
+}
+
+/*!
+  \brief Get property value by index
+
+  \param iIndex property index
+
+  \return property value
+  \return NULL on error
+*/
+const VFKProperty *VFKFeature::GetProperty(int iIndex) const
+{
+    if (iIndex < 0 || iIndex >= m_poDataBlock->GetPropertyCount() ||
+	size_t(iIndex) >= m_propertyList.size())
+        return NULL;
+
+    const VFKProperty* poProperty = &m_propertyList[iIndex];
+    return poProperty;
+}
+
+/*!
+  \brief Get property value by name
+
+  \param pszName property name
+
+  \return property value
+  \return NULL on error
+*/
+const VFKProperty *VFKFeature::GetProperty(const char *pszName) const
+{
+    return GetProperty(m_poDataBlock->GetPropertyIndex(pszName));
+}
+
+/*!
+  \brief Load geometry (point layers)
+
+  \todo Really needed?
+
+  \return TRUE on success
+  \return FALSE on failure
+*/
+bool VFKFeature::LoadGeometryPoint()
+{
+    double x, y;
+    int i_idxX, i_idxY;
+
+    i_idxY = m_poDataBlock->GetPropertyIndex("SOURADNICE_Y");
+    i_idxX = m_poDataBlock->GetPropertyIndex("SOURADNICE_X");
+    if (i_idxY < 0 || i_idxX < 0)
+        return FALSE;
+
+    x = -1.0 * GetProperty(i_idxY)->GetValueD();
+    y = -1.0 * GetProperty(i_idxX)->GetValueD();
+    OGRPoint pt(x, y);
+    SetGeometry(&pt);
+
+    return TRUE;
+}
+
+/*!
+  \brief Load geometry (linestring SBP layer)
+
+  \todo Really needed?
+
+  \return TRUE on success or FALSE on failure
+*/
+bool VFKFeature::LoadGeometryLineStringSBP()
+{
+    int id, idxId, idxBp_Id, idxPCB, ipcb;
+
+    VFKDataBlock *poDataBlockPoints;
+    VFKFeature   *poPoint, *poLine;
+
+    OGRLineString OGRLine;
+
+    poDataBlockPoints = (VFKDataBlock *) m_poDataBlock->GetReader()->GetDataBlock("SOBR");
+    if (!poDataBlockPoints)
+        return FALSE;
+
+    idxId    = poDataBlockPoints->GetPropertyIndex("ID");
+    idxBp_Id = m_poDataBlock->GetPropertyIndex("BP_ID");
+    idxPCB   = m_poDataBlock->GetPropertyIndex("PORADOVE_CISLO_BODU");
+    if (idxId < 0 || idxBp_Id < 0 || idxPCB < 0)
+        return false;
+
+    poLine = this;
+    while (TRUE)
+    {
+        id   = poLine->GetProperty(idxBp_Id)->GetValueI();
+        ipcb = poLine->GetProperty(idxPCB)->GetValueI();
+        if (OGRLine.getNumPoints() > 0 && ipcb == 1)
+        {
+            m_poDataBlock->GetPreviousFeature(); /* push back */
+            break;
+        }
+
+        poPoint = poDataBlockPoints->GetFeature(idxId, id);
+        if (!poPoint)
+        {
+            continue;
+        }
+        OGRPoint *pt = (OGRPoint *) poPoint->GetGeometry();
+        OGRLine.addPoint(pt);
+
+        poLine = (VFKFeature *) m_poDataBlock->GetNextFeature();
+        if (!poLine)
+            break;
+    };
+
+    OGRLine.setCoordinateDimension(2); /* force 2D */
+    SetGeometry(&OGRLine);
+
+    /* reset reading */
+    poDataBlockPoints->ResetReading();
+
+    return TRUE;
+}
+
+/*!
+  \brief Load geometry (linestring HP/DPM layer)
+
+  \todo Really needed?
+
+  \return TRUE on success or FALSE on failure
+*/
+bool VFKFeature::LoadGeometryLineStringHP()
+{
+    int           id, idxId, idxHp_Id;
+    VFKDataBlock *poDataBlockLines;
+    VFKFeature   *poLine;
+
+    poDataBlockLines = (VFKDataBlock *) m_poDataBlock->GetReader()->GetDataBlock("SBP");
+    if (!poDataBlockLines)
+        return FALSE;
+
+    idxId    = m_poDataBlock->GetPropertyIndex("ID");
+    idxHp_Id = poDataBlockLines->GetPropertyIndex("HP_ID");
+    if (idxId < 0 || idxHp_Id < 0)
+        return FALSE;
+
+    id = GetProperty(idxId)->GetValueI();
+    poLine = poDataBlockLines->GetFeature(idxHp_Id, id);
+    if (!poLine || !poLine->GetGeometry())
+        return FALSE;
+
+    SetGeometry(poLine->GetGeometry());
+    poDataBlockLines->ResetReading();
+
+    return TRUE;
+}
+
+/*!
+  \brief Load geometry (polygon BUD/PAR layers)
+
+  \todo Implement (really needed?)
+
+  \return TRUE on success or FALSE on failure
+*/
+bool VFKFeature::LoadGeometryPolygon()
+{
+    return FALSE;
+}
+OGRErr VFKFeature::LoadProperties(OGRFeature *poFeature)
+{
+    for (int iField = 0; iField < m_poDataBlock->GetPropertyCount(); iField++) {
+        if (GetProperty(iField)->IsNull())
+            continue;
+        OGRFieldType fType = poFeature->GetDefnRef()->GetFieldDefn(iField)->GetType();
+        if (fType == OFTInteger)
+            poFeature->SetField(iField,
+                                GetProperty(iField)->GetValueI());
+        else if (fType == OFTReal)
+            poFeature->SetField(iField,
+                                GetProperty(iField)->GetValueD());
+        else
+            poFeature->SetField(iField,
+                                GetProperty(iField)->GetValueS());
+    }
+
+    return OGRERR_NONE;
+}
Index: /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/port/cplkeywordparser.cpp
===================================================================
--- /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/port/cplkeywordparser.cpp	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/configs/1.10/linux/debian/port/cplkeywordparser.cpp	(revision 24686)
@@ -0,0 +1,368 @@
+/******************************************************************************
+ * $Id: cplkeywordparser.cpp 20996 2010-10-28 18:38:15Z rouault $
+ *
+ * Project:  Common Portability Library
+ * Purpose:  Implementation of CPLKeywordParser - a class for parsing
+ *           the keyword format used for files like QuickBird .RPB files.
+ *           This is a slight variation on the NASAKeywordParser used for
+ *           the PDS/ISIS2/ISIS3 formats.
+ * Author:   Frank Warmerdam <warmerdam@pobox.com
+ *
+ ******************************************************************************
+ * Copyright (c) 2008, Frank Warmerdam <warmerdam@pobox.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ****************************************************************************/
+
+#include "cpl_string.h"
+#include "cplkeywordparser.h"
+
+CPL_CVSID("$Id");
+
+/************************************************************************/
+/* ==================================================================== */
+/*                          CPLKeywordParser                           */
+/* ==================================================================== */
+/************************************************************************/
+
+/************************************************************************/
+/*                         CPLKeywordParser()                          */
+/************************************************************************/
+
+CPLKeywordParser::CPLKeywordParser()
+
+{
+    papszKeywordList = NULL;
+}
+
+/************************************************************************/
+/*                        ~CPLKeywordParser()                          */
+/************************************************************************/
+
+CPLKeywordParser::~CPLKeywordParser()
+
+{
+    CSLDestroy( papszKeywordList );
+    papszKeywordList = NULL;
+}
+
+/************************************************************************/
+/*                               Ingest()                               */
+/************************************************************************/
+
+int CPLKeywordParser::Ingest( VSILFILE *fp )
+
+{
+/* -------------------------------------------------------------------- */
+/*      Read in buffer till we find END all on it's own line.           */
+/* -------------------------------------------------------------------- */
+    for( ; TRUE; )
+    {
+        const char *pszCheck;
+        char szChunk[513];
+
+        int nBytesRead = VSIFReadL( szChunk, 1, 512, fp );
+
+        szChunk[nBytesRead] = '\0';
+        osHeaderText += szChunk;
+
+        if( nBytesRead < 512 )
+            break;
+
+        if( osHeaderText.size() > 520 )
+            pszCheck = osHeaderText.c_str() + (osHeaderText.size() - 520);
+        else
+            pszCheck = szChunk;
+
+        if( strstr(pszCheck,"\r\nEND;\r\n") != NULL
+            || strstr(pszCheck,"\nEND;\n") != NULL )
+            break;
+    }
+
+    pszHeaderNext = osHeaderText.c_str();
+
+/* -------------------------------------------------------------------- */
+/*      Process name/value pairs, keeping track of a "path stack".      */
+/* -------------------------------------------------------------------- */
+    return ReadGroup( "" );
+}
+
+/************************************************************************/
+/*                             ReadGroup()                              */
+/************************************************************************/
+
+int CPLKeywordParser::ReadGroup( const char *pszPathPrefix )
+
+{
+    CPLString osName, osValue;
+
+    for( ; TRUE; )
+    {
+        if( !ReadPair( osName, osValue ) )
+            return FALSE;
+
+        if( EQUAL(osName,"BEGIN_GROUP") )
+        {
+            if( !ReadGroup( (CPLString(pszPathPrefix) + osValue + ".").c_str() ) )
+                return FALSE;
+        }
+        else if( EQUALN(osName,"END",3) )
+        {
+            return TRUE;
+        }
+        else
+        {
+            osName = pszPathPrefix + osName;
+            papszKeywordList = CSLSetNameValue( papszKeywordList,
+                                                osName, osValue );
+        }
+    }
+}
+
+/************************************************************************/
+/*                              ReadPair()                              */
+/*                                                                      */
+/*      Read a name/value pair from the input stream.  Strip off        */
+/*      white space, ignore comments, split on '='.                     */
+/************************************************************************/
+
+int CPLKeywordParser::ReadPair( CPLString &osName, CPLString &osValue )
+
+{
+    osName = "";
+    osValue = "";
+
+    if( !ReadWord( osName ) )
+        return FALSE;
+
+    SkipWhite();
+
+    if( EQUAL(osName,"END") )
+        return TRUE;
+
+    if( *pszHeaderNext != '=' )
+    {
+        // ISIS3 does not have anything after the end group/object keyword.
+        if( EQUAL(osName,"End_Group") || EQUAL(osName,"End_Object") )
+            return TRUE;
+        else
+            return FALSE;
+    }
+
+    pszHeaderNext++;
+
+    SkipWhite();
+
+    osValue = "";
+
+    // Handle value lists like:     Name   = (Red, Red)
+    // or list of lists like : TLCList = ( (0,  0.000000), (8299,  4.811014) );
+    if( *pszHeaderNext == '(' )
+    {
+        CPLString osWord;
+        int nDepth = 0;
+        const char* pszLastPos = pszHeaderNext;
+
+        while( ReadWord( osWord ) && pszLastPos != pszHeaderNext)
+        {
+            SkipWhite();
+            pszLastPos = pszHeaderNext;
+
+            osValue += osWord;
+            const char* pszIter = osWord.c_str();
+            int bInQuote = FALSE;
+            while(*pszIter != '\0')
+            {
+                if (*pszIter == '"')
+                    bInQuote = !bInQuote;
+                else if (!bInQuote)
+                {
+                    if (*pszIter == '(')
+                        nDepth ++;
+                    else if (*pszIter == ')')
+                    {
+                        nDepth --;
+                        if (nDepth == 0)
+                            break;
+                    }
+                }
+                pszIter ++;
+            }
+            if (*pszIter == ')' && nDepth == 0)
+                break;
+        }
+    }
+
+    else // Handle more normal "single word" values.
+    {
+        if( !ReadWord( osValue ) )
+            return FALSE;
+
+    }
+
+    SkipWhite();
+
+    // No units keyword?
+    if( *pszHeaderNext != '<' )
+        return TRUE;
+
+    // Append units keyword.  For lines that like like this:
+    //  MAP_RESOLUTION               = 4.0 <PIXEL/DEGREE>
+
+    CPLString osWord;
+
+    osValue += " ";
+
+    while( ReadWord( osWord ) )
+    {
+        SkipWhite();
+
+        osValue += osWord;
+        if( osWord[strlen(osWord)-1] == '>' )
+            break;
+    }
+
+    return TRUE;
+}
+
+/************************************************************************/
+/*                              ReadWord()                              */
+/************************************************************************/
+
+int CPLKeywordParser::ReadWord( CPLString &osWord )
+
+{
+    osWord = "";
+
+    SkipWhite();
+
+    if( *pszHeaderNext == '\0' || *pszHeaderNext == '=' )
+        return FALSE;
+
+    while( *pszHeaderNext != '\0'
+           && *pszHeaderNext != '='
+           && *pszHeaderNext != ';'
+           && !isspace((unsigned char)*pszHeaderNext) )
+    {
+        if( *pszHeaderNext == '"' )
+        {
+            osWord += *(pszHeaderNext++);
+            while( *pszHeaderNext != '"' )
+            {
+                if( *pszHeaderNext == '\0' )
+                    return FALSE;
+
+                osWord += *(pszHeaderNext++);
+            }
+            osWord += *(pszHeaderNext++);
+        }
+        else if( *pszHeaderNext == '\'' )
+        {
+            osWord += *(pszHeaderNext++);
+            while( *pszHeaderNext != '\'' )
+            {
+                if( *pszHeaderNext == '\0' )
+                    return FALSE;
+
+                osWord += *(pszHeaderNext++);
+            }
+            osWord += *(pszHeaderNext++);
+        }
+        else
+        {
+            osWord += *pszHeaderNext;
+            pszHeaderNext++;
+        }
+    }
+
+    if( *pszHeaderNext == ';' )
+        pszHeaderNext++;
+
+    return TRUE;
+}
+
+/************************************************************************/
+/*                             SkipWhite()                              */
+/************************************************************************/
+
+void CPLKeywordParser::SkipWhite()
+
+{
+    for( ; TRUE; )
+    {
+        // Skip white space (newline, space, tab, etc )
+        if( isspace( (unsigned char)*pszHeaderNext ) )
+        {
+            pszHeaderNext++;
+            continue;
+        }
+
+        // Skip C style comments
+        if( *pszHeaderNext == '/' && pszHeaderNext[1] == '*' )
+        {
+            pszHeaderNext += 2;
+
+            while( *pszHeaderNext != '\0'
+                   && (*pszHeaderNext != '*'
+                       || pszHeaderNext[1] != '/' ) )
+            {
+                pszHeaderNext++;
+            }
+
+            pszHeaderNext += 2;
+            continue;
+        }
+
+        // Skip # style comments
+        if( *pszHeaderNext == '#'  )
+        {
+            pszHeaderNext += 1;
+
+            // consume till end of line.
+            while( *pszHeaderNext != '\0'
+                   && *pszHeaderNext != 10
+                   && *pszHeaderNext != 13 )
+            {
+                pszHeaderNext++;
+            }
+            continue;
+        }
+
+        // not white space, return.
+        return;
+    }
+}
+
+/************************************************************************/
+/*                             GetKeyword()                             */
+/************************************************************************/
+
+const char *CPLKeywordParser::GetKeyword( const char *pszPath,
+                                            const char *pszDefault )
+
+{
+    const char *pszResult;
+
+    pszResult = CSLFetchNameValue( papszKeywordList, pszPath );
+    if( pszResult == NULL )
+        return pszDefault;
+    else
+        return pszResult;
+}
+
Index: /issm/trunk/externalpackages/gdal/install-1.10-debian-netcdf.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-1.10-debian-netcdf.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/install-1.10-debian-netcdf.sh	(revision 24686)
@@ -0,0 +1,49 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="1.10.0"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+NETCDF_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+PROJ_ROOT="${ISSM_DIR}/externalpackages/proj/install"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gdal-${VER}.tar.gz" "gdal-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gdal-${VER}.tar.gz
+
+# Move source into 'src' directory
+mv gdal-${VER}/* src
+rm -rf gdal-${VER}
+
+# Copy customized source files to 'src' directory
+cp configs/1.10/linux/debian/frmts/wms/dataset.cpp src/frmts/wms
+cp configs/1.10/linux/debian/ogr/ogrsf_frmts/vfk/vfkfeature.cpp src/ogr/ogrsf_frmts/vfk
+cp configs/1.10/linux/debian/port/cplkeywordparser.cpp src/port
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/gdal/install" \
+	--with-hdf5="${HDF5_ROOT}" \
+	--with-netcdf="${NETCDF_ROOT}" \
+	--with-proj="${PROJ_ROOT}"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/gdal/install-3.0-netcdf.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3.0-netcdf.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/install-3.0-netcdf.sh	(revision 24686)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="3.0.2"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+NETCDF_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+PROJ_ROOT="${ISSM_DIR}/externalpackages/proj/install"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/gdal-${VER}.tar.gz" "gdal-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gdal-$VER.tar.gz
+
+# Move source into 'src' directory
+mv gdal-$VER/* src
+rm -rf gdal-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/gdal/install" \
+	--with-hdf5="${HDF5_ROOT}" \
+	--with-netcdf="${NETCDF_ROOT}" \
+	--with-proj="${PROJ_ROOT}"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/gdal/install-3.0-python-netcdf.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3.0-python-netcdf.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/install-3.0-python-netcdf.sh	(revision 24686)
@@ -0,0 +1,49 @@
+#!/bin/bash
+set -eu
+
+
+## TODO
+#	- May want to supply path to Python instead of, effectively, using result of `which python`
+#
+
+## Constants
+#
+VER="3.0.2"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+NETCDF_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+PROJ_ROOT="${ISSM_DIR}/externalpackages/proj/install"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/gdal-${VER}.tar.gz" "gdal-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gdal-$VER.tar.gz
+
+# Move source into 'src' directory
+mv gdal-$VER/* src
+rm -rf gdal-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/gdal/install" \
+	--with-python \
+	--with-hdf5="${HDF5_ROOT}" \
+	--with-netcdf="${NETCDF_ROOT}" \
+	--with-proj="${PROJ_ROOT}"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/gdal/install-3.0-python.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3.0-python.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/install-3.0-python.sh	(revision 24686)
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -eu
+
+
+## TODO
+#	- May want to supply path to Python instead of, effectively, using result of `which python`
+#
+
+## Constants
+#
+VER="3.0.2"
+PROJ_ROOT="${ISSM_DIR}/externalpackages/proj/install"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/gdal-${VER}.tar.gz" "gdal-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gdal-$VER.tar.gz
+
+# Move source into 'src' directory
+mv gdal-$VER/* src
+rm -rf gdal-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/gdal/install" \
+	--with-python \
+	--without-hdf5 \
+	--without-netcdf \
+	--with-proj="${PROJ_ROOT}"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/gdal/install-3.0.sh
===================================================================
--- /issm/trunk/externalpackages/gdal/install-3.0.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gdal/install-3.0.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="3.0.2"
+PROJ_ROOT="${ISSM_DIR}/externalpackages/proj/install"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/gdal-${VER}.tar.gz" "gdal-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gdal-${VER}.tar.gz
+
+# Move source into 'src' directory
+mv gdal-$VER/* src
+rm -rf gdal-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/gdal/install" \
+	--without-hdf5 \
+	--without-netcdf \
+	--with-proj="${PROJ_ROOT}"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/gmsh/install.sh
===================================================================
--- /issm/trunk/externalpackages/gmsh/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/gmsh/install.sh	(revision 24686)
@@ -2,4 +2,7 @@
 set -eu
 
+
+## Constants
+#
 VER="3.0.5"
 
@@ -12,23 +15,22 @@
 
 # Untar source
-tar -xvzf gmsh-$VER-source.tgz
+tar -xvzf gmsh-${VER}-source.tgz
 
-# Move source to src directory
-mv gmsh-$VER-source/* src
-rm -rf gmsh-$VER-source
+# Move source to 'src' directory
+mv gmsh-${VER}-source/* src
+rm -rf gmsh-${VER}-source
 
 # Configure
 cd install
 cmake ../src \
-	-DCMAKE_INSTALL_PREFIX="$ISSM_DIR/externalpackages/gmsh/install" \
+	-DCMAKE_INSTALL_PREFIX="${ISSM_DIR}/externalpackages/gmsh/install" \
 	-DENABLE_MPI=1
 
-# Compile
+# Compile and install
 if [ $# -eq 0 ]; then
 	make
+	make install
 else
 	make -j $1
+	make -j $1 install
 fi
-
-# Install
-make install
Index: /issm/trunk/externalpackages/gmt/configs/5.1/linux/cmake/ConfigUser.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/5.1/linux/cmake/ConfigUser.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/gmt/configs/5.1/linux/cmake/ConfigUser.cmake	(revision 24686)
@@ -0,0 +1,240 @@
+#
+# $Id: ConfigUserTemplate.cmake 12904 2014-02-17 20:52:35Z fwobbe $
+#
+# Copyright (c) 1991-2014 by P. Wessel, W. H. F. Smith, R. Scharroo, J. Luis and F. Wobbe
+# See LICENSE.TXT file for copying and redistribution conditions.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation; version 3 or any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
+# for more details.
+#
+# Contact info: gmt.soest.hawaii.edu
+# ----------------------------------------------------------------------------
+
+# Use this file to override variables in 'ConfigDefault.cmake' on a per-user
+# basis.  First copy 'ConfigUserTemplate.cmake' to 'ConfigUser.cmake', then
+# edit 'ConfigUser.cmake'.  'ConfigUser.cmake' is not version controlled
+# (currently listed in svn:ignore property)
+#
+# Note: CMake considers an empty string, "FALSE", "OFF", "NO", or any string
+# ending in "-NOTFOUND" to be false (this happens to be case-insensitive, so
+# "False", "off", "no", and "something-NotFound" are all false).  Other values
+# are true.  Thus it does not matter whether you use TRUE and FALSE, ON and
+# OFF, or YES and NO for your booleans.
+
+##
+## Section 1: Installation paths
+##
+
+# ============================================================================
+# Basic setup begins here.  All settings are optional.  In most cases, setting
+# CMAKE_INSTALL_PREFIX should be all you need to do in order to build GMT with
+# reasonable defaults enabled.
+# ============================================================================
+
+# Installation path (usually defaults to /usr/local) [auto]:
+set (CMAKE_INSTALL_PREFIX "$ENV{ISSM_DIR}/externalpackages/gmt/install")
+
+# Set install name suffix used for directories and gmt executables
+# [undefined]:
+#set (GMT_INSTALL_NAME_SUFFIX "suffix")
+
+# Install into traditional directory structure. Disable to install a
+# distribution type directory structure (doc and share separated) [on]:
+#set (GMT_INSTALL_TRADITIONAL_FOLDERNAMES OFF)
+
+# Install convenience links for GMT modules. Disable to install only the main
+# gmt program and access modules as "gmt modulename options" [TRUE]:
+#set (GMT_INSTALL_MODULE_LINKS FALSE)
+
+# Make executables relocatable on supported platforms (relative RPATH) [TRUE]:
+#set (GMT_INSTALL_RELOCATABLE FALSE)
+
+# ============================================================================
+# Advanced configuration begins here.  Usually it is not necessary to edit any
+# settings below.  You should know what you are doing if you do though.  Note:
+# installation paths are relative to ${CMAKE_INSTALL_PREFIX} unless absolute
+# path is given.
+# ============================================================================
+
+# Set binary installation path [bin]:
+#set (GMT_BINDIR "bin")
+
+# Set library installation path [lib or lib64]:
+#set (GMT_LIBDIR "lib")
+
+# Set include installation path [include/gmt${GMT_INSTALL_NAME_SUFFIX}]:
+#set (GMT_INCLUDEDIR "include/gmt")
+
+# Set share installation path [share or share/gmt${GMT_INSTALL_NAME_SUFFIX}]:
+#set (GMT_DATADIR "share/gmt")
+
+# Set doc installation path [share/doc or
+# share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}]:
+#set (GMT_DOCDIR "share/doc/gmt")
+
+# Set manpage installation path [share/man or
+# share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}/man]:
+#set (GMT_MANDIR "share/doc/gmt/man")
+
+# Install documentation files from this external location instead of creating
+# new PDF and HTML documents from scratch [${GMT_SOURCE_DIR}/doc_release]:
+#set (GMT_INSTALL_EXTERNAL_DOC OFF)
+
+# Install manual pages from this external location instead of creating the
+# manpages from scratch [${GMT_SOURCE_DIR}/man_release]:
+#set (GMT_INSTALL_EXTERNAL_MAN OFF)
+
+##
+## Section 2: Build dependencies (should only be needed if CMake cannot
+## automatically detect the rights version or path.)
+##
+
+# Set path to GSHHG Shoreline Database [auto]:
+set (GSHHG_ROOT "$ENV{ISSM_DIR}/externalpackages/gshhg/install")
+
+# Copy GSHHG files to $/coast [FALSE]:
+#set (COPY_GSHHG TRUE)
+
+# Set path to DCW Digital Chart of the World for GMT [auto]:
+#set (DCW_ROOT "dcw-gmt_path")
+
+# Copy DCW files to $/dcw [FALSE]:
+#set (COPY_DCW TRUE)
+
+# Set location of NetCDF (can be root directory, path to header file or path
+# to nc-config) [auto]:
+set (NETCDF_ROOT "$ENV{ISSM_DIR}/externalpackages/netcdf/install")
+
+# Set location of GDAL (can be root directory, path to header file or path to
+# gdal-config) [auto]:
+set (GDAL_ROOT "$ENV{ISSM_DIR}/externalpackages/gdal/install")
+
+# Set location of PCRE (can be root directory, path to header file or path to
+# pcre-config) [auto]:
+#set (PCRE_ROOT "pcre_install_prefix")
+
+# Set location of single precision FFTW (can be root directory or path to
+# header file) [auto]:
+#set (FFTW3_ROOT "fftw_install_prefix")
+
+# Set location of ZLIB (can be root directory or path to header file) [auto]:
+set (ZLIB_ROOT "$ENV{ISSM_DIR}/externalpackages/zlib/install")
+
+##
+## Section 3: GMT features
+##
+
+# Enforce GPL or LGPL conformity. Use this to disable routines that cannot be
+# redistributed under the terms of the GPL or LGPL such as Shewchuk's
+# triangulation (valid values are GPL, LGPL and off) [off]:
+#set (LICENSE_RESTRICTED GPL)
+
+# Configure default units (possible values are SI and US) [SI]:
+#set (UNITS "US")
+
+# Enable building of shared libraries [TRUE] (disable to use static libraries;
+# not recommended):
+#set (BUILD_SHARED_LIBS FALSE)
+
+# Build GMT shared lib with supplemental modules [TRUE]:
+#set (BUILD_SUPPLEMENTS FALSE)
+
+##
+## Section 4: Advanced tweaking
+##
+
+#
+# Testing and development
+#
+
+# Enable running examples/tests with "ctest" or "make check" (out-of-source).
+# Need to set either DO_EXAMPLES, DO_TESTS or both and uncomment the following
+# line.
+#enable_testing()
+#set (DO_EXAMPLES TRUE)
+#set (DO_TESTS TRUE)
+# Number of parallel test jobs with "make check":
+#set (N_TEST_JOBS 4)
+
+# Enable this option to run GMT programs from within ${GMT_BINARY_DIR} without
+# installing or setting GMT_SHAREDIR and GMT_USERDIR first. This is required
+# for testing [OFF]:
+#set (SUPPORT_EXEC_IN_BINARY_DIR ON)
+
+# List extra sub-dirs of 'src' with a CMakeList.txt to build non-module codes
+# that link against the full gmt libs (not just the API; for building codes
+# that only need the GMT API, see the gmtextension project).
+#set (EXTRA_BUILD_DIRS apidemo)
+
+# Directory in which to install the release sources per default
+# [${GMT_BINARY_DIR}/gmt-${GMT_PACKAGE_VERSION}]:
+#set (GMT_RELEASE_PREFIX "release-src-prefix")
+
+# If set to false, image conversion from PS images to PNG and PDF does
+# not depend on the gmt binary target. Note: "make gmt" is then required
+# before docs_depends [TRUE].
+#set (GMT_DOCS_DEPEND_ON_GMT FALSE)
+
+#
+# Debugging
+#
+
+# Set build type can be: empty, Debug, Release, RelWithDebInfo or MinSizeRel
+# [Release]:
+#set (CMAKE_BUILD_TYPE Debug)
+
+# Extra debugging for developers:
+#add_definitions(-DDEBUG)
+#add_definitions(-DMEMDEBUG) # Turn on memory tracking see gmt_support.c for extra info
+#set (CMAKE_C_FLAGS "-Wall -Wdeclaration-after-statement") # recommended even for release build
+#set (CMAKE_C_FLAGS "-Wextra ${CMAKE_C_FLAGS}")            # extra warnings
+#set (CMAKE_C_FLAGS_DEBUG -ggdb3)                          # gdb debugging symbols
+#set (CMAKE_C_FLAGS_RELEASE "-ggdb3 -O2 -Wuninitialized")  # check uninitialized variables
+#set (CMAKE_LINK_DEPENDS_DEBUG_MODE TRUE)                  # debug link dependencies
+
+#
+# System specific tweaks
+#
+
+# This is for GCC on Solaris to avoid "relocations remain against allocatable
+# but non-writable sections" problems:
+#set (USER_GMTLIB_LINK_FLAGS -mimpure-text)
+
+# This may be needed to enable strdup and extended math functions with GCC and
+# Suncc on Solaris:
+#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__EXTENSIONS__")
+
+# Do not warn when building with Windows SDK or Visual Studio Express:
+#set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS ON)
+
+# Manually select runtime library when compiling with Windows SDK or Visual
+# Studio Express:
+#set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS c:/Windows/System32/msvcr100.dll)
+
+# If your NetCDF library is static (not recommended, applies to Windows only)
+#set (NETCDF_STATIC TRUE)
+
+# If want to rename the DLLs to something else than the default (e.g. to
+# append the bitness - Windows only)
+#if (WIN32)
+# set (BITAGE 32)
+# # Detect if we are building a 32 or 64 bits version
+# if (CMAKE_SIZEOF_VOID_P EQUAL 8)
+#   set (BITAGE 64)
+# endif ()
+# set (GMT_DLL_RENAME gmt_w${BITAGE})
+# set (PSL_DLL_RENAME psl_w${BITAGE})
+#endif(WIN32)
+
+# On Windows Visual C 2012 needs _ALLOW_KEYWORD_MACROS to build
+#if(MSVC11)
+#  add_definitions(/D_ALLOW_KEYWORD_MACROS)
+#endif(MSVC11)
+
+# vim: textwidth=78 noexpandtab tabstop=2 softtabstop=2 shiftwidth=2
Index: /issm/trunk/externalpackages/gmt/configs/6.0/linux/cmake/ConfigUser.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/6.0/linux/cmake/ConfigUser.cmake	(revision 24686)
+++ /issm/trunk/externalpackages/gmt/configs/6.0/linux/cmake/ConfigUser.cmake	(revision 24686)
@@ -0,0 +1,308 @@
+#
+#
+# Copyright (c) 1991-2019 by the GMT Team (https://www.generic-mapping-tools.org/team.html)
+# See LICENSE.TXT file for copying and redistribution conditions.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation; version 3 or any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
+# for more details.
+#
+# Contact info: www.generic-mapping-tools.org
+# ----------------------------------------------------------------------------
+
+# Use this file to override variables in 'ConfigDefault.cmake' on a per-user
+# basis.  First copy 'ConfigUserTemplate.cmake' to 'ConfigUser.cmake', then
+# edit 'ConfigUser.cmake'.  'ConfigUser.cmake' is not version controlled
+# (currently listed in .gitignore).
+#
+# Note: CMake considers an empty string, "FALSE", "OFF", "NO", or any string
+# ending in "-NOTFOUND" to be false (this happens to be case-insensitive, so
+# "False", "off", "no", and "something-NotFound" are all false).  Other values
+# are true.  Thus it does not matter whether you use TRUE and FALSE, ON and
+# OFF, or YES and NO for your booleans.
+
+##
+## Section 1: Installation paths
+##
+
+# ============================================================================
+# Basic setup begins here.  All settings are optional.  In most cases, setting
+# CMAKE_INSTALL_PREFIX should be all you need to do in order to build GMT with
+# reasonable defaults enabled.  Note: If you need to specify directory names
+# with spaces (e.g., on Windows) then you must put them in quotes.
+# ============================================================================
+
+# Installation path (usually defaults to /usr/local) [auto]:
+set (CMAKE_INSTALL_PREFIX "$ENV{ISSM_DIR}/externalpackages/gmt/install")
+
+# Set install name suffix used for directories and gmt executables
+# [undefined]:
+#set (GMT_INSTALL_NAME_SUFFIX "suffix")
+
+# Install into traditional directory structure. Disable to install a
+# distribution type directory structure (doc and share separated) [on]:
+#set (GMT_INSTALL_TRADITIONAL_FOLDERNAMES OFF)
+
+# Install convenience links for GMT modules. Disable to install only the main
+# gmt program and access modules as "gmt modulename options" [TRUE]:
+#set (GMT_INSTALL_MODULE_LINKS FALSE)
+
+# Make executables relocatable on supported platforms (relative RPATH) [FALSE]:
+set (GMT_INSTALL_RELOCATABLE TRUE)
+
+# Exclude optional GDAL, PCRE, PCRE2, FFTW3, LAPACK, BLAS, ZLIB dependencies even if you have them installed [FALSE]
+#set (GMT_EXCLUDE_GDAL TRUE)
+#set (GMT_EXCLUDE_PCRE TRUE)
+#set (GMT_EXCLUDE_PCRE2 TRUE)
+#set (GMT_EXCLUDE_FFTW3 TRUE)
+#set (GMT_EXCLUDE_LAPACK TRUE)
+#set (GMT_EXCLUDE_BLAS TRUE)
+#set (GMT_EXCLUDE_ZLIB TRUE)
+
+# ============================================================================
+# Advanced configuration begins here.  Usually it is not necessary to edit any
+# settings below.  You should know what you are doing if you do though.  Note:
+# installation paths are relative to ${CMAKE_INSTALL_PREFIX} unless absolute
+# path is given.
+# ============================================================================
+
+# Set binary installation path [bin]:
+#set (GMT_BINDIR "bin")
+
+# Set library installation path [lib or lib64]:
+#set (GMT_LIBDIR "lib")
+
+# Set include installation path [include/gmt${GMT_INSTALL_NAME_SUFFIX}]:
+#set (GMT_INCLUDEDIR "include/gmt")
+
+# Set share installation path [share or share/gmt${GMT_INSTALL_NAME_SUFFIX}]:
+#set (GMT_DATADIR "share/gmt")
+
+# Set doc installation path [share/doc or
+# share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}]:
+#set (GMT_DOCDIR "share/doc/gmt")
+
+# Set manpage installation path [share/man or
+# share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}/man]:
+#set (GMT_MANDIR "share/doc/gmt/man")
+
+# Install documentation files from this external location instead of creating
+# new HTML documents from scratch [${GMT_SOURCE_DIR}/doc_release]:
+#set (GMT_INSTALL_EXTERNAL_DOC OFF)
+
+# Install manual pages from this external location instead of creating the
+# manpages from scratch [${GMT_SOURCE_DIR}/man_release]:
+#set (GMT_INSTALL_EXTERNAL_MAN OFF)
+
+##
+## Section 2: Build dependencies (should only be needed if CMake cannot
+## automatically detect the rights version or path.)
+##
+
+# Set URL to GMT Data server [auto]:
+#set (GMT_DATA_SERVER "data_server_url")
+
+# Set path to GSHHG Shoreline Database [auto]:
+set (GSHHG_ROOT "$ENV{ISSM_DIR}/externalpackages/gshhg/install")
+
+# Copy GSHHG files to ${GMT_DATADIR}/coast [FALSE]:
+#set (COPY_GSHHG TRUE)
+
+# Set path to DCW Digital Chart of the World for GMT [auto]:
+#set (DCW_ROOT "dcw-gmt_path")
+
+# Copy DCW files to ${GMT_DATADIR}/dcw [FALSE]:
+#set (COPY_DCW TRUE)
+
+# Copy GDAL's 'data' directory to ${GMT_DATADIR}/GDAL_DATA [FALSE]:
+#set (GDAL_DATA_PATH C:/programs/compa_libs/gdal_GIT/compileds/VC14_64/data)
+
+# Copy PROJ4's 'share' directory to ${GMT_DATADIR}/GDAL_DATA [FALSE]:
+#set (PROJ_DATA_PATH C:/programs/compa_libs/proj5_GIT/compileds/VC14_64/share/proj)
+
+# FOR WINDOWS ONLY
+# Set path to location of Ghostscript binaries (optional install)
+#set (GHOST_DATA_PATH C:/programs/compa_libs/ghostscript/bin)
+
+# FOR WINDOWS ONLY
+# Set path to location where the gmtmex is located.
+#set (GMTMEX_PATH "C:/progs_cygw/GMTdev/gmtmex/${GMTver}")
+
+# Set location of NetCDF (can be root directory, path to header file or path
+# to nc-config) [auto]:
+set (NETCDF_ROOT "$ENV{ISSM_DIR}/externalpackages/petsc/install")
+
+# Set location of GDAL (can be root directory, path to header file or path to
+# gdal-config) [auto]:
+set (GDAL_ROOT "$ENV{ISSM_DIR}/externalpackages/gdal/install")
+
+# Set location of PCRE (can be root directory, path to header file or path to
+# pcre-config) [auto]:
+#set (PCRE_ROOT "pcre_install_prefix")
+# Alternatively, set location of PCRE2 (can be root directory, path to header file or path to
+# pcre2-config) [auto]:
+#set (PCRE2_ROOT "pcre2_install_prefix")
+
+# Set location of single precision FFTW (can be root directory or path to
+# header file) [auto]:
+#set (FFTW3_ROOT "fftw_install_prefix")
+
+# Set location of ZLIB (can be root directory or path to header file) [auto]:
+set (ZLIB_ROOT "$ENV{ISSM_DIR}/externalpackages/petsc/install")
+
+# Set location of CURL (can be root directory or path to header file) [auto]:
+#set (CURL_ROOT "curl_install_prefix")
+
+# Set location of GLIB component gthread [auto].  This is an optional (and
+# experimental) option which you need to enable:
+#set (GMT_USE_THREADS TRUE)
+# If pkg-config is not installed (e.g. on Windows) you need to specify these:
+#set (GLIB_INCLUDE_DIR c:/path/to/glib-dev/include/glib-2.0)
+#set (GLIB_LIBRARIES c:/path/to/glib-dev/lib/glib-2.0.lib)
+
+# Set LAPACK location. Use this when want to link with LAPACK and it's not found automatically
+set (LAPACK_LIBRARY "-L$ENV{ISSM_DIR}/externalpackages/petsc/install/lib -lflapack")
+set (BLAS_LIBRARY "-L$ENV{ISSM_DIR}/externalpackages/petsc/install/lib -lfblas")
+
+##
+## Section 3: GMT features
+##
+
+# Enforce GPL or LGPL conformity. Use this to disable routines that cannot be
+# redistributed under the terms of the GPL or LGPL such as Shewchuk's
+# triangulation (valid values are GPL, LGPL and off) [off]:
+#set (LICENSE_RESTRICTED GPL)
+
+# Allow building of OpenMP if compiler supports it
+# set (GMT_ENABLE_OPENMP TRUE)
+
+# Configure default units (possible values are SI and US) [SI]:
+#set (UNITS "US")
+
+# Enable building of shared libraries [TRUE] (disable to use static libraries;
+# not recommended; on non-x86 architectures uncomment the next option as well):
+#set (BUILD_SHARED_LIBS FALSE)
+
+# Create position independent code on all targets [auto] (needed for static
+# build on non-x86):
+#set (CMAKE_POSITION_INDEPENDENT_CODE TRUE)
+
+# Build GMT shared lib with supplemental modules [TRUE]:
+#set (BUILD_SUPPLEMENTS FALSE)
+
+# Build/Install GMT Developer include files [TRUE]:
+# This installs the extra include files and configured files needed by 3rd-party
+# developers.  Until we build a separate gmt-devel we include them in the main
+# Distribution.
+#set (BUILD_DEVELOPER FALSE)
+
+##
+## Section 4: Advanced tweaking
+##
+
+#
+# Testing and development
+#
+
+# Enable running examples/tests with "ctest" or "make check" (out-of-source).
+# Need to set either DO_EXAMPLES, DO_TESTS or both and uncomment the following
+# line.
+#enable_testing()
+#set (DO_EXAMPLES TRUE)
+#set (DO_TESTS TRUE)
+#set (DO_ANIMATIONS TRUE)
+# Number of parallel test jobs with "make check":
+#set (N_TEST_JOBS 4)
+
+# Enable this option to run GMT programs from within ${GMT_BINARY_DIR} without
+# installing or setting GMT_SHAREDIR and GMT_USERDIR first. This is required
+# for testing [OFF]:
+#set (SUPPORT_EXEC_IN_BINARY_DIR ON)
+
+# List extra sub-dirs of 'src' with a CMakeList.txt to build non-module codes
+# that link against the full gmt libs (not just the API; for building codes
+# that only need the GMT API, see the gmt-custom project).
+#set (EXTRA_BUILD_DIRS apidemo)
+# Uncomment the following line to enable running low-level C tests of the API
+#set (DO_API_TESTS ON)
+
+# Directory in which to install the release sources per default
+# [${GMT_BINARY_DIR}/gmt-${GMT_PACKAGE_VERSION}]:
+#set (GMT_RELEASE_PREFIX "release-src-prefix")
+
+# If set to false, image conversion from PS images to PNG and PDF does
+# not depend on the gmt binary target. Note: "make gmt" is then required
+# before docs_depends [TRUE].
+#set (GMT_DOCS_DEPEND_ON_GMT FALSE)
+
+#
+# Debugging
+#
+
+# Set build type can be: empty, Debug, Release, RelWithDebInfo or MinSizeRel
+# [Release]:
+#set (CMAKE_BUILD_TYPE Debug)
+
+# Extra debugging for developers:
+#if ( CMAKE_GENERATOR STREQUAL "Xcode" )
+##	So Xcode can find the supplemental plug-ins during debug sessions
+#	add_definitions(-DXCODER)
+#   add_definitions(-DDEBUG_MODERN)			# To set PPID == 0 during Xcode test
+#	message("Add Xcode definition for GMT")
+#endif()
+#add_definitions(-DDEBUG)
+#add_definitions(-DMEMDEBUG) # Turn on memory tracking see gmt_support.c for extra info
+#set (CMAKE_C_FLAGS "-Wall -Wdeclaration-after-statement") # recommended even for release build
+#set (CMAKE_C_FLAGS "-Wextra ${CMAKE_C_FLAGS}")            # extra warnings
+#set (CMAKE_C_FLAGS_DEBUG -ggdb3)                          # gdb debugging symbols
+#set (CMAKE_LINK_DEPENDS_DEBUG_MODE TRUE)                  # debug link dependencies
+if (HAVE_OPENMP)
+	set (CMAKE_C_FLAGS_RELEASE "-ggdb3 -O2 -Wuninitialized -flax-vector-conversions")  # check uninitialized variables
+else (HAVE_OPENMP)
+	set (CMAKE_C_FLAGS_RELEASE "-ggdb3 -O2 -Wuninitialized")  # check uninitialized variables
+endif (HAVE_OPENMP)
+
+#
+# System specific tweaks
+#
+
+# This is for GCC on Solaris to avoid "relocations remain against allocatable
+# but non-writable sections" problems:
+#set (USER_GMTLIB_LINK_FLAGS -mimpure-text)
+
+# This may be needed to enable strdup and extended math functions with GCC and
+# Suncc on Solaris:
+#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__EXTENSIONS__")
+
+# Do not warn when building with Windows SDK or Visual Studio Express:
+#set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS ON)
+
+# Manually select runtime library when compiling with Windows SDK or Visual
+# Studio Express:
+#set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS c:/Windows/System32/msvcr100.dll)
+
+# If your NetCDF library is static (not recommended, applies to Windows only)
+#set (NETCDF_STATIC TRUE)
+
+# If want to rename the DLLs to something else than the default (e.g. to
+# append the bitness - Windows only)
+# WARNING: if using this option it is mandatory that the suffix starts with an underscore.
+#if (WIN32)
+# set (BITAGE 32)
+# # Detect if we are building a 32 or 64 bits version
+# if (CMAKE_SIZEOF_VOID_P EQUAL 8)
+#   set (BITAGE 64)
+# endif ()
+# set (GMT_DLL_RENAME gmt_w${BITAGE})
+# set (PSL_DLL_RENAME psl_w${BITAGE})
+#endif(WIN32)
+
+# On Windows Visual C 2012 needs _ALLOW_KEYWORD_MACROS to build
+#if(MSVC11)
+#  add_definitions(/D_ALLOW_KEYWORD_MACROS)
+#endif(MSVC11)
Index: /issm/trunk/externalpackages/gmt/configs/ConfigUser.cmake
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/ConfigUser.cmake	(revision 24685)
+++ /issm/trunk/externalpackages/gmt/configs/ConfigUser.cmake	(revision 24686)
@@ -103,5 +103,5 @@
 
 # Set path to DCW Digital Chart of the World for GMT [auto]:
-#set (DCW_ROOT "dcw-gmt_path")
+#set (DCW_ROOT "$ENV{ISSM_DIR}/externalpackages/dcw/install")
 
 # Copy DCW files to $/dcw [FALSE]:
@@ -141,8 +141,8 @@
 # Enable building of shared libraries [TRUE] (disable to use static libraries;
 # not recommended):
-#set (BUILD_SHARED_LIBS FALSE)
+#set (BUILD_SHARED_LIBS TRUE)
 
 # Build GMT shared lib with supplemental modules [TRUE]:
-#set (BUILD_SUPPLEMENTS FALSE)
+#set (BUILD_SUPPLEMENTS TRUE)
 
 ##
Index: sm/trunk/externalpackages/gmt/configs/ConfigUser.cmake-jenkins
===================================================================
--- /issm/trunk/externalpackages/gmt/configs/ConfigUser.cmake-jenkins	(revision 24685)
+++ 	(revision )
@@ -1,240 +1,0 @@
-#
-# $Id: ConfigUserTemplate.cmake 12904 2014-02-17 20:52:35Z fwobbe $
-#
-# Copyright (c) 1991-2014 by P. Wessel, W. H. F. Smith, R. Scharroo, J. Luis and F. Wobbe
-# See LICENSE.TXT file for copying and redistribution conditions.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation; version 3 or any later version.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
-# for more details.
-#
-# Contact info: gmt.soest.hawaii.edu
-# ----------------------------------------------------------------------------
-
-# Use this file to override variables in 'ConfigDefault.cmake' on a per-user
-# basis.  First copy 'ConfigUserTemplate.cmake' to 'ConfigUser.cmake', then
-# edit 'ConfigUser.cmake'.  'ConfigUser.cmake' is not version controlled
-# (currently listed in svn:ignore property)
-#
-# Note: CMake considers an empty string, "FALSE", "OFF", "NO", or any string
-# ending in "-NOTFOUND" to be false (this happens to be case-insensitive, so
-# "False", "off", "no", and "something-NotFound" are all false).  Other values
-# are true.  Thus it does not matter whether you use TRUE and FALSE, ON and
-# OFF, or YES and NO for your booleans.
-
-##
-## Section 1: Installation paths
-##
-
-# ============================================================================
-# Basic setup begins here.  All settings are optional.  In most cases, setting
-# CMAKE_INSTALL_PREFIX should be all you need to do in order to build GMT with
-# reasonable defaults enabled.
-# ============================================================================
-
-# Installation path (usually defaults to /usr/local) [auto]:
-set (CMAKE_INSTALL_PREFIX "$ENV{ISSM_DIR}/externalpackages/gmt/install")
-
-# Set install name suffix used for directories and gmt executables
-# [undefined]:
-#set (GMT_INSTALL_NAME_SUFFIX "suffix")
-
-# Install into traditional directory structure. Disable to install a
-# distribution type directory structure (doc and share separated) [on]:
-#set (GMT_INSTALL_TRADITIONAL_FOLDERNAMES OFF)
-
-# Install convenience links for GMT modules. Disable to install only the main
-# gmt program and access modules as "gmt modulename options" [TRUE]:
-#set (GMT_INSTALL_MODULE_LINKS FALSE)
-
-# Make executables relocatable on supported platforms (relative RPATH) [TRUE]:
-#set (GMT_INSTALL_RELOCATABLE FALSE)
-
-# ============================================================================
-# Advanced configuration begins here.  Usually it is not necessary to edit any
-# settings below.  You should know what you are doing if you do though.  Note:
-# installation paths are relative to ${CMAKE_INSTALL_PREFIX} unless absolute
-# path is given.
-# ============================================================================
-
-# Set binary installation path [bin]:
-#set (GMT_BINDIR "bin")
-
-# Set library installation path [lib or lib64]:
-#set (GMT_LIBDIR "lib")
-
-# Set include installation path [include/gmt${GMT_INSTALL_NAME_SUFFIX}]:
-#set (GMT_INCLUDEDIR "include/gmt")
-
-# Set share installation path [share or share/gmt${GMT_INSTALL_NAME_SUFFIX}]:
-#set (GMT_DATADIR "share/gmt")
-
-# Set doc installation path [share/doc or
-# share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}]:
-#set (GMT_DOCDIR "share/doc/gmt")
-
-# Set manpage installation path [share/man or
-# share/doc/gmt${GMT_INSTALL_NAME_SUFFIX}/man]:
-#set (GMT_MANDIR "share/doc/gmt/man")
-
-# Install documentation files from this external location instead of creating
-# new PDF and HTML documents from scratch [${GMT_SOURCE_DIR}/doc_release]:
-#set (GMT_INSTALL_EXTERNAL_DOC OFF)
-
-# Install manual pages from this external location instead of creating the
-# manpages from scratch [${GMT_SOURCE_DIR}/man_release]:
-#set (GMT_INSTALL_EXTERNAL_MAN OFF)
-
-##
-## Section 2: Build dependencies (should only be needed if CMake cannot
-## automatically detect the rights version or path.)
-##
-
-# Set path to GSHHG Shoreline Database [auto]:
-set (GSHHG_ROOT "$ENV{ISSM_DIR}/externalpackages/gshhg/install")
-
-# Copy GSHHG files to $/coast [FALSE]:
-#set (COPY_GSHHG TRUE)
-
-# Set path to DCW Digital Chart of the World for GMT [auto]:
-#set (DCW_ROOT "dcw-gmt_path")
-
-# Copy DCW files to $/dcw [FALSE]:
-#set (COPY_DCW TRUE)
-
-# Set location of NetCDF (can be root directory, path to header file or path
-# to nc-config) [auto]:
-set (NETCDF_ROOT "/")
-
-# Set location of GDAL (can be root directory, path to header file or path to
-# gdal-config) [auto]:
-set (GDAL_ROOT "$ENV{ISSM_DIR}/externalpackages/gdal/install")
-
-# Set location of PCRE (can be root directory, path to header file or path to
-# pcre-config) [auto]:
-#set (PCRE_ROOT "pcre_install_prefix")
-
-# Set location of single precision FFTW (can be root directory or path to
-# header file) [auto]:
-#set (FFTW3_ROOT "fftw_install_prefix")
-
-# Set location of ZLIB (can be root directory or path to header file) [auto]:
-#set (ZLIB_ROOT "zlib_install_prefix")
-
-##
-## Section 3: GMT features
-##
-
-# Enforce GPL or LGPL conformity. Use this to disable routines that cannot be
-# redistributed under the terms of the GPL or LGPL such as Shewchuk's
-# triangulation (valid values are GPL, LGPL and off) [off]:
-#set (LICENSE_RESTRICTED GPL)
-
-# Configure default units (possible values are SI and US) [SI]:
-#set (UNITS "US")
-
-# Enable building of shared libraries [TRUE] (disable to use static libraries;
-# not recommended):
-#set (BUILD_SHARED_LIBS FALSE)
-
-# Build GMT shared lib with supplemental modules [TRUE]:
-#set (BUILD_SUPPLEMENTS FALSE)
-
-##
-## Section 4: Advanced tweaking
-##
-
-#
-# Testing and development
-#
-
-# Enable running examples/tests with "ctest" or "make check" (out-of-source).
-# Need to set either DO_EXAMPLES, DO_TESTS or both and uncomment the following
-# line.
-#enable_testing()
-#set (DO_EXAMPLES TRUE)
-#set (DO_TESTS TRUE)
-# Number of parallel test jobs with "make check":
-#set (N_TEST_JOBS 4)
-
-# Enable this option to run GMT programs from within ${GMT_BINARY_DIR} without
-# installing or setting GMT_SHAREDIR and GMT_USERDIR first. This is required
-# for testing [OFF]:
-#set (SUPPORT_EXEC_IN_BINARY_DIR ON)
-
-# List extra sub-dirs of 'src' with a CMakeList.txt to build non-module codes
-# that link against the full gmt libs (not just the API; for building codes
-# that only need the GMT API, see the gmtextension project).
-#set (EXTRA_BUILD_DIRS apidemo)
-
-# Directory in which to install the release sources per default
-# [${GMT_BINARY_DIR}/gmt-${GMT_PACKAGE_VERSION}]:
-#set (GMT_RELEASE_PREFIX "release-src-prefix")
-
-# If set to false, image conversion from PS images to PNG and PDF does
-# not depend on the gmt binary target. Note: "make gmt" is then required
-# before docs_depends [TRUE].
-#set (GMT_DOCS_DEPEND_ON_GMT FALSE)
-
-#
-# Debugging
-#
-
-# Set build type can be: empty, Debug, Release, RelWithDebInfo or MinSizeRel
-# [Release]:
-#set (CMAKE_BUILD_TYPE Debug)
-
-# Extra debugging for developers:
-#add_definitions(-DDEBUG)
-#add_definitions(-DMEMDEBUG) # Turn on memory tracking see gmt_support.c for extra info
-#set (CMAKE_C_FLAGS "-Wall -Wdeclaration-after-statement") # recommended even for release build
-#set (CMAKE_C_FLAGS "-Wextra ${CMAKE_C_FLAGS}")            # extra warnings
-#set (CMAKE_C_FLAGS_DEBUG -ggdb3)                          # gdb debugging symbols
-#set (CMAKE_C_FLAGS_RELEASE "-ggdb3 -O2 -Wuninitialized")  # check uninitialized variables
-#set (CMAKE_LINK_DEPENDS_DEBUG_MODE TRUE)                  # debug link dependencies
-
-#
-# System specific tweaks
-#
-
-# This is for GCC on Solaris to avoid "relocations remain against allocatable
-# but non-writable sections" problems:
-#set (USER_GMTLIB_LINK_FLAGS -mimpure-text)
-
-# This may be needed to enable strdup and extended math functions with GCC and
-# Suncc on Solaris:
-#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__EXTENSIONS__")
-
-# Do not warn when building with Windows SDK or Visual Studio Express:
-#set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS ON)
-
-# Manually select runtime library when compiling with Windows SDK or Visual
-# Studio Express:
-#set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS c:/Windows/System32/msvcr100.dll)
-
-# If your NetCDF library is static (not recommended, applies to Windows only)
-#set (NETCDF_STATIC TRUE)
-
-# If want to rename the DLLs to something else than the default (e.g. to
-# append the bitness - Windows only)
-#if (WIN32)
-# set (BITAGE 32)
-# # Detect if we are building a 32 or 64 bits version
-# if (CMAKE_SIZEOF_VOID_P EQUAL 8)
-#   set (BITAGE 64)
-# endif ()
-# set (GMT_DLL_RENAME gmt_w${BITAGE})
-# set (PSL_DLL_RENAME psl_w${BITAGE})
-#endif(WIN32)
-
-# On Windows Visual C 2012 needs _ALLOW_KEYWORD_MACROS to build
-#if(MSVC11)
-#  add_definitions(/D_ALLOW_KEYWORD_MACROS)
-#endif(MSVC11)
-
-# vim: textwidth=78 noexpandtab tabstop=2 softtabstop=2 shiftwidth=2
Index: /issm/trunk/externalpackages/gmt/install-5.1-linux.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install-5.1-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gmt/install-5.1-linux.sh	(revision 24686)
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="5.1.1"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gmt-${VER}.tar.gz" "gmt-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gmt-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv gmt-$VER/* src
+rm -rf gmt-$VER
+
+# Copy source customizations
+cp configs/5.1/linux/cmake/ConfigUser.cmake src/cmake
+
+# Configure
+cd src
+mkdir build
+cd build
+cmake ..
+
+# Install
+if [ $# -eq 0 ]; then
+	make install
+else
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ../..
Index: /issm/trunk/externalpackages/gmt/install-6.0-linux.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install-6.0-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/gmt/install-6.0-linux.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+## Environment
+#
+CURL_ROOT="${ISSM_DIR}/externalpackages/curl/install"
+
+## Constants
+#
+VER="6.0.0"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gmt-${VER}.tar.gz" "gmt-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf gmt-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv gmt-${VER}/* src
+rm -rf gmt-${VER}
+
+# Copy custom configuration files
+cp ./configs/6.0/linux/cmake/ConfigUser.cmake ./src/cmake
+
+# Configure
+cd src
+mkdir build
+cd build
+
+# NOTE: There is a CMake variable named CURL_ROOT in src/cmake/ConfigUser.cmake
+#		that, ostensibly, allows for supplying the path to curl when it is in a
+#		a non-standard location. That said, newer versions of CMake will
+#		ignore said variable and instead try to find curl itself. Passing in
+#		the two options below overrides this behavior.
+cmake \
+	-DCURL_LIBRARY="${CURL_ROOT}/lib" \
+	-DCURL_INCLUDE_DIR="${CURL_ROOT}/include" \
+	..
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/gmt/install.sh
===================================================================
--- /issm/trunk/externalpackages/gmt/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/gmt/install.sh	(revision 24686)
@@ -2,25 +2,33 @@
 set -eu
 
-#Erase install
-rm -rf install  src gmt
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/gmt-5.1.1.tar.gz' 'gmt-5.1.1.tar.gz'
+# Constants
+#
+VER="5.1.1"
 
-#install directory
-mkdir src
-tar -zxvf gmt-5.1.1.tar.gz 
-mv gmt-5.1.1/* src
-rm -rf gmt-5.1.1
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gmt-${VER}.tar.gz" "gmt-${VER}.tar.gz"
 
-#configure: 
-cp configs/ConfigUser.cmake ./src/cmake
+# Unpack source
+tar -zxvf gmt-$VER.tar.gz
 
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv gmt-$VER/* src
+rm -rf gmt-$VER
+
+# Copy source customizations
+cp configs/ConfigUser.cmake src/cmake
+
+# Configure
 cd src
 mkdir build
 cd build
-cmake ../
+cmake ..
 
-#compile
+# Install
 if [ $# -eq 0 ]; then
 	make install
@@ -29,4 +37,4 @@
 fi
 
-#come back: 
-cd ../../
+# Return to initial directory
+cd ../..
Index: /issm/trunk/externalpackages/gshhg/install.sh
===================================================================
--- /issm/trunk/externalpackages/gshhg/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/gshhg/install.sh	(revision 24686)
@@ -2,15 +2,18 @@
 set -eu
 
-rm -rf gssh-gmt-2.3.4.tar.gz  src install
 
-#get gssh database from noaa's website:  http://www.ngdc.noaa.gov/mgg/shorelines/gshhs.html
-#curl http://www.ngdc.noaa.gov/mgg/shorelines/data/gshhg/latest/gshhg-gmt-2.3.4.tar.gz > gshhg-gmt-2.3.4.tar.gz
+## Constants
+#
+VER="2.3.4"
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/gshhg-gmt-2.3.4.tar.gz' 'gshhg-gmt-2.3.4.tar.gz'
+# Cleanup
+rm -rf install
 
-#untar: 
-tar -zxvf gshhg-gmt-2.3.4.tar.gz 
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/gshhg-gmt-${VER}.tar.gz" "gshhg-gmt-${VER}.tar.gz"
 
-#move: 
-mv gshhg-gmt-2.3.4 install
+# Unpack source
+tar -zxvf gshhg-gmt-${VER}.tar.gz
+
+# Install
+mv gshhg-gmt-${VER} install
Index: /issm/trunk/externalpackages/gsl/install-javascript.sh
===================================================================
--- /issm/trunk/externalpackages/gsl/install-javascript.sh	(revision 24685)
+++ /issm/trunk/externalpackages/gsl/install-javascript.sh	(revision 24686)
@@ -2,20 +2,32 @@
 set -eu
 
-#Source emscripten to ensure emcc/em++ compiler are in env
-source $ISSM_DIR/externalpackages/emscripten/install/emsdk_env.sh
+
+# TODO:
+# - Revisit enviroment variables (especially EMCC_CFLAGS) once support for
+#	Fortran has been accomplished.
+#
+
+# Environment
 export CC=emcc
 export CXX=em++
+export AR=emar
+export RANLIB=emranlib
+#export EMCC_DEBUG=1 # Uncomment to enable debugging
+export EMCC_CFLAGS="-s ERROR_ON_UNDEFINED_SYMBOLS=0" # Required after v1.38.14 to avoid undefined symbol warnings from our Fortran object files being treated as errors
+
+# Source Emscripten environment
+source $ISSM_DIR/externalpackages/emscripten/install/emsdk_env.sh
 
 # Issue with variadic function signatures.
-export CFLAGS=-DSTDC_HEADERS
+#export CFLAGS=-DSTDC_HEADERS
 
-#Some cleanup
+# Cleanup from previous installation
 rm -rf src-javascript install-javascript gsl-1.15
 mkdir src-javascript install-javascript
 
-#Download from ISSM server
+# Download source
 $ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/gsl-1.15.tar.gz' 'gsl-1.15.tar.gz'
 
-#Untar 
+#Untar
 tar -zxvf  gsl-1.15.tar.gz
 
@@ -27,5 +39,6 @@
 cd src-javascript
 
-./configure --prefix="$ISSM_DIR/externalpackages/gsl/install-javascript" \
+./configure \
+	--prefix="$ISSM_DIR/externalpackages/gsl/install-javascript" \
 	--disable-shared
 
@@ -36,3 +49,3 @@
 	make -j $1
 fi
-make install 
+make install
Index: /issm/trunk/externalpackages/hdf5/install-1.10-parallel-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1.10-parallel-with_tests.sh	(revision 24686)
+++ /issm/trunk/externalpackages/hdf5/install-1.10-parallel-with_tests.sh	(revision 24686)
@@ -0,0 +1,49 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="1.10.5"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+## Environnment
+#
+export CC=mpicc
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/hdf5-${VER}.tar.gz" "hdf5-${VER}.tar.gz"
+
+# Untar source
+tar -zxvf hdf5-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv hdf5-$VER/* src/
+rm -rf hdf5-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/hdf5/install" \
+	--enable-parallel \
+	--with-zlib=${ZLIB_ROOT} \
+	--enable-hl
+
+# Compile, test, and install
+#
+if [ $# -eq 0 ]; then
+	make
+	make check
+	make install
+else
+	make -j $1
+	make -j $1 check
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/hdf5/install-1.10-parallel.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1.10-parallel.sh	(revision 24686)
+++ /issm/trunk/externalpackages/hdf5/install-1.10-parallel.sh	(revision 24686)
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="1.10.5"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+## Environnment
+#
+export CC=mpicc
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/hdf5-${VER}.tar.gz" "hdf5-${VER}.tar.gz"
+
+# Untar source
+tar -zxvf hdf5-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv hdf5-$VER/* src/
+rm -rf hdf5-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/hdf5/install" \
+	--enable-parallel \
+	--with-zlib=${ZLIB_ROOT} \
+	--enable-hl
+
+# Compile and install
+#
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/hdf5/install-1.10-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1.10-with_tests.sh	(revision 24686)
+++ /issm/trunk/externalpackages/hdf5/install-1.10-with_tests.sh	(revision 24686)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="1.10.5"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/hdf5-${VER}.tar.gz" "hdf5-${VER}.tar.gz"
+
+# Untar source
+tar -zxvf hdf5-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv hdf5-$VER/* src/
+rm -rf hdf5-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/hdf5/install" \
+	--with-zlib=${ZLIB_ROOT} \
+	--enable-hl
+
+# Compile, test, and install
+#
+if [ $# -eq 0 ]; then
+	make
+	make check
+	make install
+else
+	make -j $1
+	make -j $1 check
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/hdf5/install-1.10.sh
===================================================================
--- /issm/trunk/externalpackages/hdf5/install-1.10.sh	(revision 24686)
+++ /issm/trunk/externalpackages/hdf5/install-1.10.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="1.10.5"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/hdf5-${VER}.tar.gz" "hdf5-${VER}.tar.gz"
+
+# Untar source
+tar -zxvf hdf5-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv hdf5-$VER/* src/
+rm -rf hdf5-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/hdf5/install" \
+	--with-zlib=${ZLIB_ROOT} \
+	--enable-hl
+
+# Compile and install
+#
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/lapack/configs/mac/3.8/CMakeLists.txt
===================================================================
--- /issm/trunk/externalpackages/lapack/configs/mac/3.8/CMakeLists.txt	(revision 24686)
+++ /issm/trunk/externalpackages/lapack/configs/mac/3.8/CMakeLists.txt	(revision 24686)
@@ -0,0 +1,401 @@
+cmake_minimum_required(VERSION 2.8.12)
+
+project(LAPACK Fortran C)
+
+set(LAPACK_MAJOR_VERSION 3)
+set(LAPACK_MINOR_VERSION 8)
+set(LAPACK_PATCH_VERSION 0)
+set(
+  LAPACK_VERSION
+  ${LAPACK_MAJOR_VERSION}.${LAPACK_MINOR_VERSION}.${LAPACK_PATCH_VERSION}
+  )
+
+# Add the CMake directory for custon CMake modules
+set(CMAKE_MODULE_PATH "${LAPACK_SOURCE_DIR}/CMAKE" ${CMAKE_MODULE_PATH})
+
+# Set a default build type if none was specified
+if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
+  message(STATUS "Setting build type to 'Release' as none was specified.")
+  set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE)
+  # Set the possible values of build type for cmake-gui
+  set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo" "Coverage")
+endif()
+
+string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER)
+if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "COVERAGE")
+  message(STATUS "Adding coverage")
+  find_package(codecov)
+endif()
+
+# By default static library
+option(BUILD_SHARED_LIBS "Build shared libraries" OFF)
+
+include(GNUInstallDirs)
+
+# Updated OSX RPATH settings
+# In response to CMake 3.0 generating warnings regarding policy CMP0042,
+# the OSX RPATH settings have been updated per recommendations found
+# in the CMake Wiki:
+#  http://www.cmake.org/Wiki/CMake_RPATH_handling#Mac_OS_X_and_the_RPATH
+set(CMAKE_MACOSX_RPATH OFF)
+set(CMAKE_SKIP_BUILD_RPATH FALSE)
+set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
+list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_FULL_LIBDIR} isSystemDir)
+if("${isSystemDir}" STREQUAL "-1")
+  set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_FULL_LIBDIR})
+  set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+endif()
+
+
+# Configure the warning and code coverage suppression file
+configure_file(
+  "${LAPACK_SOURCE_DIR}/CTestCustom.cmake.in"
+  "${LAPACK_BINARY_DIR}/CTestCustom.cmake"
+  @ONLY
+)
+
+include(PreventInSourceBuilds)
+include(PreventInBuildInstalls)
+
+if(UNIX)
+  if("${CMAKE_Fortran_COMPILER}" MATCHES "ifort")
+    set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fp-model strict")
+  endif()
+  if("${CMAKE_Fortran_COMPILER}" MATCHES "xlf")
+    set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -qnosave -qstrict=none")
+  endif()
+# Delete libmtsk in linking sequence for Sun/Oracle Fortran Compiler.
+# This library is not present in the Sun package SolarisStudio12.3-linux-x86-bin
+  string(REPLACE \;mtsk\; \; CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES "${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES}")
+endif()
+
+if(CMAKE_Fortran_COMPILER_ID STREQUAL "Compaq")
+  if(WIN32)
+    if(CMAKE_GENERATOR STREQUAL "NMake Makefiles")
+      get_filename_component(CMAKE_Fortran_COMPILER_CMDNAM ${CMAKE_Fortran_COMPILER} NAME_WE)
+      message(STATUS "Using Compaq Fortran compiler with command name ${CMAKE_Fortran_COMPILER_CMDNAM}")
+      set(cmd ${CMAKE_Fortran_COMPILER_CMDNAM})
+      string(TOLOWER "${cmd}" cmdlc)
+      if(cmdlc STREQUAL "df")
+        message(STATUS "Assume the Compaq Visual Fortran Compiler is being used")
+        set(CMAKE_Fortran_USE_RESPONSE_FILE_FOR_OBJECTS 1)
+        set(CMAKE_Fortran_USE_RESPONSE_FILE_FOR_INCLUDES 1)
+        #This is a workaround that is needed to avoid forward-slashes in the
+        #filenames listed in response files from incorrectly being interpreted as
+        #introducing compiler command options
+        if(${BUILD_SHARED_LIBS})
+          message(FATAL_ERROR "Making of shared libraries with CVF has not been tested.")
+        endif()
+        set(str "NMake version 9 or later should be used. NMake version 6.0 which is\n")
+        set(str "${str}   included with the CVF distribution fails to build Lapack because\n")
+        set(str "${str}   the number of source files exceeds the limit for NMake v6.0\n")
+        message(STATUS ${str})
+        set(CMAKE_Fortran_LINK_EXECUTABLE "LINK /out:<TARGET> <LINK_FLAGS> <LINK_LIBRARIES> <OBJECTS>")
+      endif()
+    endif()
+  endif()
+endif()
+
+# Get Python
+message(STATUS "Looking for Python greater than 2.6 - ${PYTHONINTERP_FOUND}")
+find_package(PythonInterp 2.7) # lapack_testing.py uses features from python 2.7 and greater
+if(PYTHONINTERP_FOUND)
+  message(STATUS "Using Python version ${PYTHON_VERSION_STRING}")
+else()
+  message(STATUS "No suitable Python version found, so skipping summary tests.")
+endif()
+# --------------------------------------------------
+
+set(LAPACK_INSTALL_EXPORT_NAME lapack-targets)
+
+macro(lapack_install_library lib)
+  install(TARGETS ${lib}
+    EXPORT ${LAPACK_INSTALL_EXPORT_NAME}
+    ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+    LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+    RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+  )
+endmacro()
+
+set(PKG_CONFIG_DIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
+
+# --------------------------------------------------
+# Testing
+option(BUILD_TESTING "Build tests" OFF)
+enable_testing()
+include(CTest)
+enable_testing()
+message(STATUS "Build tests: ${BUILD_TESTING}")
+
+# --------------------------------------------------
+# Organize output files.  On Windows this also keeps .dll files next
+# to the .exe files that need them, making tests easy to run.
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${LAPACK_BINARY_DIR}/bin)
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LAPACK_BINARY_DIR}/lib)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LAPACK_BINARY_DIR}/lib)
+
+# --------------------------------------------------
+# Check for any necessary platform specific compiler flags
+include(CheckLAPACKCompilerFlags)
+CheckLAPACKCompilerFlags()
+
+# --------------------------------------------------
+# Check second function
+
+include(CheckTimeFunction)
+set(TIME_FUNC NONE ${TIME_FUNC})
+CHECK_TIME_FUNCTION(NONE TIME_FUNC)
+CHECK_TIME_FUNCTION(INT_CPU_TIME TIME_FUNC)
+CHECK_TIME_FUNCTION(EXT_ETIME TIME_FUNC)
+CHECK_TIME_FUNCTION(EXT_ETIME_ TIME_FUNC)
+CHECK_TIME_FUNCTION(INT_ETIME TIME_FUNC)
+message(STATUS "--> Will use second_${TIME_FUNC}.f and dsecnd_${TIME_FUNC}.f as timing function.")
+
+set(SECOND_SRC ${LAPACK_SOURCE_DIR}/INSTALL/second_${TIME_FUNC}.f)
+set(DSECOND_SRC ${LAPACK_SOURCE_DIR}/INSTALL/dsecnd_${TIME_FUNC}.f)
+
+# deprecated LAPACK and LAPACKE routines
+option(BUILD_DEPRECATED "Build deprecated routines" OFF)
+message(STATUS "Build deprecated routines: ${BUILD_DEPRECATED}")
+
+# --------------------------------------------------
+# Precision to build
+# By default all precisions are generated
+option(BUILD_SINGLE "Build single precision real" ON)
+option(BUILD_DOUBLE "Build double precision real" ON)
+option(BUILD_COMPLEX "Build single precision complex" ON)
+option(BUILD_COMPLEX16 "Build double precision complex" ON)
+message(STATUS "Build single precision real: ${BUILD_SINGLE}")
+message(STATUS "Build double precision real: ${BUILD_DOUBLE}")
+message(STATUS "Build single precision complex: ${BUILD_COMPLEX}")
+message(STATUS "Build double precision complex: ${BUILD_COMPLEX16}")
+
+if(NOT (BUILD_SINGLE OR BUILD_DOUBLE OR BUILD_COMPLEX OR BUILD_COMPLEX16))
+  message(FATAL_ERROR "Nothing to build, no precision selected.
+  Please enable at least one of these:
+  BUILD_SINGLE, BUILD_DOUBLE, BUILD_COMPLEX, BUILD_COMPLEX16.")
+endif()
+
+# --------------------------------------------------
+# Subdirectories that need to be processed
+option(USE_OPTIMIZED_BLAS "Whether or not to use an optimized BLAS library instead of included netlib BLAS" OFF)
+
+# Check the usage of the user provided BLAS libraries
+if(BLAS_LIBRARIES)
+  include(CheckFortranFunctionExists)
+  set(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
+  CHECK_FORTRAN_FUNCTION_EXISTS("dgemm" BLAS_FOUND)
+  unset(CMAKE_REQUIRED_LIBRARIES)
+  if(BLAS_FOUND)
+    message(STATUS "--> BLAS supplied by user is WORKING, will use ${BLAS_LIBRARIES}.")
+  else()
+    message(ERROR "--> BLAS supplied by user is not WORKING, CANNOT USE ${BLAS_LIBRARIES}.")
+    message(ERROR "-->     Will use REFERENCE BLAS (by default)")
+    message(ERROR "-->     Or Correct your BLAS_LIBRARIES entry ")
+    message(ERROR "-->     Or Consider checking USE_OPTIMIZED_BLAS")
+  endif()
+
+# User did not provide a BLAS Library but specified to search for one
+elseif(USE_OPTIMIZED_BLAS)
+  find_package(BLAS)
+endif()
+
+# Neither user specified or optimized BLAS libraries can be used
+if(NOT BLAS_FOUND)
+  message(STATUS "Using supplied NETLIB BLAS implementation")
+  add_subdirectory(BLAS)
+  set(BLAS_LIBRARIES blas)
+else()
+  set(CMAKE_EXE_LINKER_FLAGS
+    "${CMAKE_EXE_LINKER_FLAGS} ${BLAS_LINKER_FLAGS}"
+    CACHE STRING "Linker flags for executables" FORCE)
+  set(CMAKE_MODULE_LINKER_FLAGS
+    "${CMAKE_MODULE_LINKER_FLAGS} ${BLAS_LINKER_FLAGS}"
+    CACHE STRING "Linker flags for modules" FORCE)
+  set(CMAKE_SHARED_LINKER_FLAGS
+    "${CMAKE_SHARED_LINKER_FLAGS} ${BLAS_LINKER_FLAGS}"
+    CACHE STRING "Linker flags for shared libs" FORCE)
+endif()
+
+
+# --------------------------------------------------
+# CBLAS
+option(CBLAS "Build CBLAS" OFF)
+
+if(CBLAS)
+  add_subdirectory(CBLAS)
+endif()
+
+# --------------------------------------------------
+# XBLAS
+
+option(USE_XBLAS "Build extended precision (needs XBLAS)" OFF)
+if(USE_XBLAS)
+  find_library(XBLAS_LIBRARY NAMES xblas)
+endif()
+
+option(USE_OPTIMIZED_LAPACK "Whether or not to use an optimized LAPACK library instead of included netlib LAPACK" OFF)
+
+# --------------------------------------------------
+# LAPACK
+# User did not provide a LAPACK Library but specified to search for one
+if(USE_OPTIMIZED_LAPACK)
+  find_package(LAPACK)
+endif()
+
+# Check the usage of the user provided or automatically found LAPACK libraries
+if(LAPACK_LIBRARIES)
+  include(CheckFortranFunctionExists)
+  set(CMAKE_REQUIRED_LIBRARIES ${LAPACK_LIBRARIES})
+  # Check if new routine of 3.4.0 is in LAPACK_LIBRARIES
+  CHECK_FORTRAN_FUNCTION_EXISTS("dgeqrt" LATESTLAPACK_FOUND)
+  unset(CMAKE_REQUIRED_LIBRARIES)
+  if(LATESTLAPACK_FOUND)
+    message(STATUS "--> LAPACK supplied by user is WORKING, will use ${LAPACK_LIBRARIES}.")
+  else()
+    message(ERROR "--> LAPACK supplied by user is not WORKING or is older than LAPACK 3.4.0, CANNOT USE ${LAPACK_LIBRARIES}.")
+    message(ERROR "-->     Will use REFERENCE LAPACK (by default)")
+    message(ERROR "-->     Or Correct your LAPACK_LIBRARIES entry ")
+    message(ERROR "-->     Or Consider checking USE_OPTIMIZED_LAPACK")
+  endif()
+endif()
+
+# Neither user specified or optimized LAPACK libraries can be used
+if(NOT LATESTLAPACK_FOUND)
+  message(STATUS "Using supplied NETLIB LAPACK implementation")
+  set(LAPACK_LIBRARIES lapack)
+  add_subdirectory(SRC)
+else()
+  set(CMAKE_EXE_LINKER_FLAGS
+    "${CMAKE_EXE_LINKER_FLAGS} ${LAPACK_LINKER_FLAGS}"
+    CACHE STRING "Linker flags for executables" FORCE)
+  set(CMAKE_MODULE_LINKER_FLAGS
+    "${CMAKE_MODULE_LINKER_FLAGS} ${LAPACK_LINKER_FLAGS}"
+    CACHE STRING "Linker flags for modules" FORCE)
+  set(CMAKE_SHARED_LINKER_FLAGS
+    "${CMAKE_SHARED_LINKER_FLAGS} ${LAPACK_LINKER_FLAGS}"
+    CACHE STRING "Linker flags for shared libs" FORCE)
+endif()
+
+if(BUILD_TESTING)
+  add_subdirectory(TESTING)
+endif()
+
+# --------------------------------------------------
+# LAPACKE
+option(LAPACKE "Build LAPACKE" OFF)
+
+# LAPACKE has also the interface to some routines from tmglib,
+# if LAPACKE_WITH_TMG is selected, we need to add those routines to LAPACKE
+option(LAPACKE_WITH_TMG "Build LAPACKE with tmglib routines" OFF)
+if(LAPACKE_WITH_TMG)
+  set(LAPACKE ON)
+endif()
+if(BUILD_TESTING OR LAPACKE_WITH_TMG) #already included, avoid double inclusion
+  add_subdirectory(TESTING/MATGEN)
+endif()
+
+if(LAPACKE)
+  add_subdirectory(LAPACKE)
+endif()
+
+# --------------------------------------------------
+# CPACK Packaging
+
+set(CPACK_PACKAGE_NAME "LAPACK")
+set(CPACK_PACKAGE_VENDOR "University of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd")
+set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "LAPACK- Linear Algebra Package")
+set(CPACK_PACKAGE_VERSION_MAJOR 3)
+set(CPACK_PACKAGE_VERSION_MINOR 5)
+set(CPACK_PACKAGE_VERSION_PATCH 0)
+set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "LAPACK")
+if(WIN32 AND NOT UNIX)
+  # There is a bug in NSI that does not handle full unix paths properly. Make
+  # sure there is at least one set of four (4) backlasshes.
+  set(CPACK_NSIS_HELP_LINK "http:\\\\\\\\http://icl.cs.utk.edu/lapack-forum")
+  set(CPACK_NSIS_URL_INFO_ABOUT "http:\\\\\\\\www.netlib.org/lapack")
+  set(CPACK_NSIS_CONTACT "lapack@eecs.utk.edu")
+  set(CPACK_NSIS_MODIFY_PATH ON)
+  set(CPACK_NSIS_DISPLAY_NAME "LAPACK-${LAPACK_VERSION}")
+  set(CPACK_PACKAGE_RELOCATABLE "true")
+else()
+  set(CPACK_GENERATOR "TGZ")
+  set(CPACK_SOURCE_GENERATOR TGZ)
+  set(CPACK_SOURCE_PACKAGE_FILE_NAME "lapack-${LAPACK_VERSION}")
+  set(CPACK_SOURCE_IGNORE_FILES ~$ .svn ${CPACK_SOURCE_IGNORE_FILES})
+endif()
+include(CPack)
+
+
+# --------------------------------------------------
+
+if(NOT BLAS_FOUND)
+  set(ALL_TARGETS ${ALL_TARGETS} blas)
+endif()
+
+if(NOT LATESTLAPACK_FOUND)
+  set(ALL_TARGETS ${ALL_TARGETS} lapack)
+endif()
+
+if(BUILD_TESTING OR LAPACKE_WITH_TMG)
+  set(ALL_TARGETS ${ALL_TARGETS} tmglib)
+endif()
+
+# Export lapack targets, not including lapacke, from the
+# install tree, if any.
+set(_lapack_config_install_guard_target "")
+if(ALL_TARGETS)
+  install(EXPORT lapack-targets
+    DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapack-${LAPACK_VERSION})
+
+  # Choose one of the lapack targets to use as a guard for
+  # lapack-config.cmake to load targets from the install tree.
+  list(GET ALL_TARGETS 0 _lapack_config_install_guard_target)
+endif()
+
+# Include cblas in targets exported from the build tree.
+if(CBLAS)
+  set(ALL_TARGETS ${ALL_TARGETS} cblas)
+endif()
+
+# Include lapacke in targets exported from the build tree.
+if(LAPACKE)
+  set(ALL_TARGETS ${ALL_TARGETS} lapacke)
+endif()
+
+# Export lapack and lapacke targets from the build tree, if any.
+set(_lapack_config_build_guard_target "")
+if(ALL_TARGETS)
+  export(TARGETS ${ALL_TARGETS} FILE lapack-targets.cmake)
+
+  # Choose one of the lapack or lapacke targets to use as a guard
+  # for lapack-config.cmake to load targets from the build tree.
+  list(GET ALL_TARGETS 0 _lapack_config_build_guard_target)
+endif()
+
+configure_file(${LAPACK_SOURCE_DIR}/CMAKE/lapack-config-build.cmake.in
+  ${LAPACK_BINARY_DIR}/lapack-config.cmake @ONLY)
+
+
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/lapack.pc.in ${CMAKE_CURRENT_BINARY_DIR}/lapack.pc @ONLY)
+  install(FILES
+  ${CMAKE_CURRENT_BINARY_DIR}/lapack.pc
+  DESTINATION ${PKG_CONFIG_DIR}
+  )
+
+configure_file(${LAPACK_SOURCE_DIR}/CMAKE/lapack-config-install.cmake.in
+  ${LAPACK_BINARY_DIR}/CMakeFiles/lapack-config.cmake @ONLY)
+
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+  ${LAPACK_BINARY_DIR}/lapack-config-version.cmake
+  VERSION ${LAPACK_VERSION}
+  COMPATIBILITY SameMajorVersion
+  )
+
+install(FILES
+  ${LAPACK_BINARY_DIR}/CMakeFiles/lapack-config.cmake
+  ${LAPACK_BINARY_DIR}/lapack-config-version.cmake
+  DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapack-${LAPACK_VERSION}
+  )
Index: /issm/trunk/externalpackages/lapack/install-3.8-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/lapack/install-3.8-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/lapack/install-3.8-linux-static.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+# NOTE: This installation script will build both BLAS and LAPACK libraries
+#
+
+## Constants
+#
+VER="3.8.0"
+
+## Environment
+#
+export CC="${ISSM_DIR}/externalpackages/mpich/install/bin/mpicc"
+export CXX="${ISSM_DIR}/externalpackages/mpich/install/bin/mpicxx"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/lapack-${VER}.tar.gz" "lapack-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf lapack-${VER}.tar.gz
+
+# Cleanup
+rm -rf build install src
+mkdir build install install/lib src
+
+# Move source to 'src' directory
+mv lapack-${VER}/* src
+rm -rf lapack-${VER}
+
+# Configure
+#
+cd build
+cmake \
+	../src
+
+# Compile
+make
+
+# Install
+cd ..
+cp ./build/lib/* ./install/lib
Index: /issm/trunk/externalpackages/lapack/install-3.8-linux.sh
===================================================================
--- /issm/trunk/externalpackages/lapack/install-3.8-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/lapack/install-3.8-linux.sh	(revision 24686)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+
+# NOTE: This installation script will build both BLAS and LAPACK libraries
+#
+
+## Constants
+#
+VER="3.8.0"
+
+## Environment
+#
+export CC="${ISSM_DIR}/externalpackages/mpich/install/bin/mpicc"
+export CXX="${ISSM_DIR}/externalpackages/mpich/install/bin/mpicxx"
+export FC="${ISSM_DIR}/externalpackages/mpich/install/bin/mpif77"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/lapack-${VER}.tar.gz" "lapack-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf lapack-${VER}.tar.gz
+
+# Cleanup
+rm -rf build install src
+mkdir build install install/lib src
+
+# Move source to 'src' directory
+mv lapack-${VER}/* src
+rm -rf lapack-${VER}
+
+# Configure
+#
+cd build
+cmake \
+	-DBUILD_SHARED_LIBS=ON \
+	../src
+
+# Compile
+make
+
+# Install
+cd ..
+cp ./build/lib/* ./install/lib
Index: /issm/trunk/externalpackages/lapack/install-3.8-mac.sh
===================================================================
--- /issm/trunk/externalpackages/lapack/install-3.8-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/lapack/install-3.8-mac.sh	(revision 24686)
@@ -0,0 +1,41 @@
+#!/bin/bash
+set -eu
+
+
+# NOTE: This installation script will build both BLAS and LAPACK libraries
+#
+
+## Constants
+#
+VER="3.8.0"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/lapack-${VER}.tar.gz" "lapack-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf lapack-$VER.tar.gz
+
+# Cleanup
+rm -rf build install src
+mkdir build install install/lib src
+
+# Move source to 'src' directory
+mv lapack-$VER/* src
+rm -rf lapack-$VER
+
+# Copy customized configuration files to 'src' directory
+cp configs/mac/3.8/CMakeLists.txt src/CMakeLists.txt
+
+# Configure
+#
+cd build
+cmake \
+	-DBUILD_SHARED_LIBS=ON \
+	../src
+
+# Compile
+make
+
+# Install
+cd ..
+cp ./build/lib/* ./install/lib
Index: /issm/trunk/externalpackages/mpich/install-3.2-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.2-linux64.sh	(revision 24685)
+++ /issm/trunk/externalpackages/mpich/install-3.2-linux64.sh	(revision 24686)
@@ -2,29 +2,38 @@
 set -eu
 
-#Some cleanup
-rm -rf src install mpich-3.2
+
+## Constants
+#
+VER="3.2"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/mpich-${VER}.tar.gz" "mpich-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf  mpich-$VER.tar.gz
+
+# Cleanup
+rm -rf src install
 mkdir src install
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/mpich-3.2.tar.gz' 'mpich-3.2.tar.gz'
+# Move source into 'src' directory
+mv mpich-$VER/* src
+rm -rf mpich-$VER
 
-#Untar
-tar -zxvf  mpich-3.2.tar.gz
-
-#Move mpich into src directory
-mv mpich-3.2/* src
-rm -rf mpich-3.2
-
-#Configure mpich
+# Configure
 cd src
 ./configure \
-	--prefix="$ISSM_DIR/externalpackages/mpich/install" \
+	--prefix="${ISSM_DIR}/externalpackages/mpich/install" \
 	--enable-shared
 
-#Compile mpich (this new version supports parallel make)
+# Compile and install
 if [ $# -eq 0 ]; then
 	make
+	make install
 else
 	make -j $1
+	make -j $1 install
 fi
-make install
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/mpich/install-3.2-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.2-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/mpich/install-3.2-mac-static.sh	(revision 24686)
@@ -0,0 +1,37 @@
+#!/bin/bash
+set -eu
+
+#Some cleanup
+rm -rf src install mpich-3.2
+mkdir src install
+
+#Download from ISSM server
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'http://issm.jpl.nasa.gov/files/externalpackages/mpich-3.2.tar.gz' 'mpich-3.2.tar.gz'
+
+#Untar
+tar -zxvf  mpich-3.2.tar.gz
+
+#Move mpich into src directory
+mv mpich-3.2/* src
+rm -rf mpich-3.2
+
+#patch from http://lists.mpich.org/pipermail/discuss/2016-May/004764.html
+cat src/src/include/mpiimpl.h | sed -e 's/} MPID_Request ATTRIBUTE((__aligned__(32)));/} ATTRIBUTE((__aligned__(32))) MPID_Request;/g' > TEMP
+mv TEMP src/src/include/mpiimpl.h
+
+#Configure mpich
+cd src
+./configure \
+	--prefix="$ISSM_DIR/externalpackages/mpich/install" \
+	--disable-shared \
+	--enable-strict=all \
+	--enable-fast \
+	--with-pic
+
+#Compile mpich (this new version supports parallel make)
+if [ $# -eq 0 ]; then
+	make
+else
+	make -j $1
+fi
+make install
Index: /issm/trunk/externalpackages/mpich/install-3.2-mac.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.2-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/mpich/install-3.2-mac.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+#Some cleanup
+rm -rf src install mpich-3.2
+mkdir src install
+
+#Download from ISSM server
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/mpich-3.2.tar.gz' 'mpich-3.2.tar.gz'
+
+#Untar 
+tar -zxvf  mpich-3.2.tar.gz
+
+#Move mpich into src directory
+mv mpich-3.2/* src
+rm -rf mpich-3.2
+
+#patch from http://lists.mpich.org/pipermail/discuss/2016-May/004764.html
+cat src/src/include/mpiimpl.h | sed -e 's/} MPID_Request ATTRIBUTE((__aligned__(32)));/} ATTRIBUTE((__aligned__(32))) MPID_Request;/g' > TEMP
+mv TEMP src/src/include/mpiimpl.h
+
+#Configure mpich
+cd src
+./configure \
+	--prefix="$ISSM_DIR/externalpackages/mpich/install" \
+	--enable-shared
+
+#Compile mpich (this new version supports parallel make)
+if [ $# -eq 0 ]; then
+	make
+else
+	make -j $1
+fi
+make install 
Index: sm/trunk/externalpackages/mpich/install-3.2-macosx64-static.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.2-macosx64-static.sh	(revision 24685)
+++ 	(revision )
@@ -1,37 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf src install mpich-3.2
-mkdir src install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'http://issm.jpl.nasa.gov/files/externalpackages/mpich-3.2.tar.gz' 'mpich-3.2.tar.gz'
-
-#Untar
-tar -zxvf  mpich-3.2.tar.gz
-
-#Move mpich into src directory
-mv mpich-3.2/* src
-rm -rf mpich-3.2
-
-#patch from http://lists.mpich.org/pipermail/discuss/2016-May/004764.html
-cat src/src/include/mpiimpl.h | sed -e 's/} MPID_Request ATTRIBUTE((__aligned__(32)));/} ATTRIBUTE((__aligned__(32))) MPID_Request;/g' > TEMP
-mv TEMP src/src/include/mpiimpl.h
-
-#Configure mpich
-cd src
-./configure \
-	--prefix="$ISSM_DIR/externalpackages/mpich/install" \
-	--disable-shared \
-	--enable-strict=all \
-	--enable-fast \
-	--with-pic
-
-#Compile mpich (this new version supports parallel make)
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make install
Index: sm/trunk/externalpackages/mpich/install-3.2-macosx64.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.2-macosx64.sh	(revision 24685)
+++ 	(revision )
@@ -1,34 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf src install mpich-3.2
-mkdir src install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/mpich-3.2.tar.gz' 'mpich-3.2.tar.gz'
-
-#Untar 
-tar -zxvf  mpich-3.2.tar.gz
-
-#Move mpich into src directory
-mv mpich-3.2/* src
-rm -rf mpich-3.2
-
-#patch from http://lists.mpich.org/pipermail/discuss/2016-May/004764.html
-cat src/src/include/mpiimpl.h | sed -e 's/} MPID_Request ATTRIBUTE((__aligned__(32)));/} ATTRIBUTE((__aligned__(32))) MPID_Request;/g' > TEMP
-mv TEMP src/src/include/mpiimpl.h
-
-#Configure mpich
-cd src
-./configure \
-	--prefix="$ISSM_DIR/externalpackages/mpich/install" \
-	--enable-shared
-
-#Compile mpich (this new version supports parallel make)
-if [ $# -eq 0 ]; then
-	make
-else
-	make -j $1
-fi
-make install 
Index: /issm/trunk/externalpackages/mpich/install-3.2-static.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.2-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/mpich/install-3.2-static.sh	(revision 24686)
@@ -0,0 +1,58 @@
+#!/bin/bash
+set -eu
+
+
+## Environment
+#
+export LD_LIBRARY_PATH="" # Ensure that libtool does not hardcode local paths set by running $ISSM_DIR/etc/environment.sh into binaries
+export LD_RUN_PATH="" # Ensure that libtool does not hardcode local paths set by running $ISSM_DIR/etc/environment.sh into binaries
+
+## Constants
+#
+VER="3.2"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/mpich-${VER}.tar.gz" "mpich-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf mpich-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv mpich-${VER}/* src
+rm -rf mpich-${VER}
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/mpich/install" \
+	--disable-shared \
+	--disable-dependency-tracking \
+	--enable-fast=all \
+	--with-pic
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
+
+# Strip RPATH/RUNPATH entries from executables
+#
+# NOTE:
+# - We are doing this so that we do not ship executables that have hardcoded
+#	local paths in their RPATH/RUNPATH entries
+# - This might otherwise be accomplished with extensive changes to libtool's
+#	handling of rpath
+#
+chrpath -d ./install/bin/hydra_pmi_proxy
+chrpath -d ./install/bin/mpiexec
Index: /issm/trunk/externalpackages/mpich/install-3.3-static.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.3-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/mpich/install-3.3-static.sh	(revision 24686)
@@ -0,0 +1,58 @@
+#!/bin/bash
+set -eu
+
+
+## Environment
+#
+export LD_LIBRARY_PATH="" # Ensure that libtool does not hardcode local paths set by running $ISSM_DIR/etc/environment.sh into binaries
+export LD_RUN_PATH="" # Ensure that libtool does not hardcode local paths set by running $ISSM_DIR/etc/environment.sh into binaries
+
+## Constants
+#
+VER="3.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/mpich-${VER}.tar.gz" "mpich-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf mpich-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv mpich-${VER}/* src
+rm -rf mpich-${VER}
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/mpich/install" \
+	--disable-shared \
+	--disable-dependency-tracking \
+	--enable-fast=all \
+	--with-pic
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
+
+# Strip RPATH/RUNPATH entries from executables
+#
+# NOTE:
+# - We are doing this so that we do not ship executables that have hardcoded
+#	local paths in their RPATH/RUNPATH entries
+# - This might otherwise be accomplished with extensive changes to libtool's
+#	handling of rpath
+#
+chrpath -d ./install/bin/hydra_pmi_proxy
+chrpath -d ./install/bin/mpiexec
Index: /issm/trunk/externalpackages/mpich/install-3.3.sh
===================================================================
--- /issm/trunk/externalpackages/mpich/install-3.3.sh	(revision 24686)
+++ /issm/trunk/externalpackages/mpich/install-3.3.sh	(revision 24686)
@@ -0,0 +1,38 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/mpich-${VER}.tar.gz" "mpich-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf mpich-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source into 'src' directory
+mv mpich-${VER}/* src
+rm -rf mpich-${VER}
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/mpich/install" \
+	--disable-static \
+	--disable-dependency-tracking \
+	--enable-fast=all
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/mumps/configs/5.0/linux/Makefile.debian.static.PAR
===================================================================
--- /issm/trunk/externalpackages/mumps/configs/5.0/linux/Makefile.debian.static.PAR	(revision 24686)
+++ /issm/trunk/externalpackages/mumps/configs/5.0/linux/Makefile.debian.static.PAR	(revision 24686)
@@ -0,0 +1,72 @@
+#
+#  This file is part of MUMPS 5.0.2, released
+#  on Fri Jul 15 09:12:54 UTC 2016
+#
+# These settings for a PC under Debian/linux with standard packages :
+# metis (parmetis), scotch (ptscotch), openmpi, gfortran
+
+# packages installation :
+# apt-get install libmetis-dev libparmetis-dev libscotch-dev libptscotch-dev libatlas-base-dev openmpi-bin libopenmpi-dev
+
+# Begin orderings
+LSCOTCHDIR = ${ISSM_DIR}/externalpackages/petsc/install/lib
+ISCOTCH   = -I${ISSM_DIR}/externalpackages/petsc/install/include # only needed for ptscotch
+
+LSCOTCH   = -L$(LSCOTCHDIR) -lptesmumps -lptscotch -lptscotcherr -lscotch
+#LSCOTCH   = -L$(LSCOTCHDIR) -lesmumps -lscotch -lscotcherr
+
+LPORDDIR = $(topdir)/PORD/lib/
+IPORD    = -I$(topdir)/PORD/include/
+LPORD    = -L$(LPORDDIR) -lpord
+
+LMETISDIR = ${ISSM_DIR}/externalpackages/petsc/install/lib
+IMETIS    = -I${ISSM_DIR}/externalpackages/petsc/install/include
+#IMETIS    = -I/usr/include/metis
+
+LMETIS    = -L$(LMETISDIR) -lparmetis -lmetis
+#LMETIS    = -L$(LMETISDIR) -lmetis
+
+# Corresponding variables reused later
+#ORDERINGSF = -Dmetis -Dpord -Dparmetis -Dscotch -Dptscotch
+ORDERINGSF = -Dmetis -Dpord -Dscotch
+ORDERINGSC  = $(ORDERINGSF)
+LORDERINGS = $(LMETIS) $(LPORD) $(LSCOTCH)
+IORDERINGSF = $(ISCOTCH)
+IORDERINGSC = $(IMETIS) $(IPORD) $(ISCOTCH)
+# End orderings
+################################################################################
+
+PLAT    =
+LIBEXT  = .a
+OUTC    = -o
+OUTF    = -o
+RM = /bin/rm -f
+CC = ${ISSM_DIR}/externalpackages/mpich/install/bin/mpicc
+FC = ${ISSM_DIR}/externalpackages/mpich/install/bin/mpifort
+FL = ${ISSM_DIR}/externalpackages/mpich/install/bin/mpifort
+AR = ar vr
+RANLIB = ranlib
+SCALAP  = -L${ISSM_DIR}/externalpackages/scalapack/install/lib -lscalapack
+
+INCPAR = -I${ISSM_DIR}/externalpackages/mpich/install/include
+
+LIBPAR = $(SCALAP) -L${ISSM_DIR}/externalpackages/mpich/install/lib -lmpi -lmpifort
+
+INCSEQ = -I$(topdir)/libseq
+LIBSEQ  =  -L$(topdir)/libseq -lmpiseq
+
+LIBBLAS = -L${ISSM_DIR}/externalpackages/openblas/install/lib -lopenblas
+LIBOTHERS = -lpthread
+
+#Preprocessor defs for calling Fortran from C (-DAdd_ or -DAdd__ or -DUPPER)
+CDEFS   = -DAdd_
+
+#Begin Optimized options
+# uncomment -fopenmp in lines below to benefit from OpenMP
+OPTF    = -O  -DALLOW_NON_INIT # -fopenmp
+OPTL    = -O # -fopenmp
+OPTC    = -O # -fopenmp
+#End Optimized options
+INCS = $(INCPAR)
+LIBS = $(LIBPAR)
+LIBSEQNEEDED =
Index: /issm/trunk/externalpackages/mumps/install-5.0-linux-parallel-static.sh
===================================================================
--- /issm/trunk/externalpackages/mumps/install-5.0-linux-parallel-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/mumps/install-5.0-linux-parallel-static.sh	(revision 24686)
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -eu
+
+
+## NOTE: This install script uses make directly rather than CMake and then make
+
+## Constants
+#
+PKG="mumps"
+VER="5.0.2-p2"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/${PKG}-${VER}.tar.gz" "${PKG}-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf ${PKG}-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install install/include install/lib src
+
+# Move source to 'src' directory
+mv MUMPS_${VER}/* src
+rm -rf MUMPS_${VER}
+
+# Copy customized source and config files to 'src' directory
+cp configs/5.0/linux/Makefile.debian.static.PAR src/Makefile.inc
+
+# Compile
+cd src
+if [ $# -eq 0 ]; then
+	make
+else
+	make -j $1
+fi
+
+# Install
+cd ..
+cp src/include/* install/include
+cp src/lib/lib*.* install/lib
Index: /issm/trunk/externalpackages/netcdf/install-4.7-parallel-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-parallel-with_tests.sh	(revision 24686)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-parallel-with_tests.sh	(revision 24686)
@@ -0,0 +1,60 @@
+#!/bin/bash
+set -eu
+
+
+# Dependencies
+# - MPI implementation (for parallel I/O support)
+# - hdf5 (1.8.9 / 1.10.1 or later, for netCDF-4 support)
+# - zlib (1.2.5 or later, for netCDF-4 compression)
+# - curl (7.18.0 or later, for DAP remote access client support)
+#
+# For most ISSM installations, only hdf5 will be necessary
+#
+
+# Constants
+#
+VER="4.7.2"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/hdf5/install"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Environment
+#
+export CC=mpicc
+CURL_ROOT="${ISSM_DIR}/externalpackages/curl/install"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/hdf5/install"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/netcdf-c-${VER}.tar.gz" "netcdf-c-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf netcdf-c-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv netcdf-c-$VER/* src/
+rm -rf netcdf-c-$VER
+
+# Configure
+cd src
+./configure \
+ 	--prefix="${ISSM_DIR}/externalpackages/netcdf/install" \
+ 	--enable-parallel-tests \
+ 	--disable-doxygen
+
+# Compile, test, and install
+if [ $# -eq 0 ]; then
+	make
+	make check
+	make install
+else
+	make -j $1
+	make -j $1 check
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/netcdf/install-4.7-parallel.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-parallel.sh	(revision 24686)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-parallel.sh	(revision 24686)
@@ -0,0 +1,57 @@
+#!/bin/bash
+set -eu
+
+
+# Dependencies
+# - MPI implementation (for parallel I/O support)
+# - hdf5 (1.8.9 / 1.10.1 or later, for netCDF-4 support)
+# - zlib (1.2.5 or later, for netCDF-4 compression)
+# - curl (7.18.0 or later, for DAP remote access client support)
+#
+# For most ISSM installations, only hdf5 will be necessary
+#
+
+# Constants
+#
+VER="4.7.2"
+CURL_ROOT="${ISSM_DIR}/externalpackages/curl/install"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/hdf5/install"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Environment
+#
+export CC=mpicc
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include -I${CURL_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib -L${CURL_ROOT}/lib"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/netcdf-c-${VER}.tar.gz" "netcdf-c-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf netcdf-c-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv netcdf-c-$VER/* src/
+rm -rf netcdf-c-$VER
+
+# Configure
+cd src
+./configure \
+ 	--prefix="${ISSM_DIR}/externalpackages/netcdf/install" \
+ 	--disable-doxygen
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/netcdf/install-4.7-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7-with_tests.sh	(revision 24686)
+++ /issm/trunk/externalpackages/netcdf/install-4.7-with_tests.sh	(revision 24686)
@@ -0,0 +1,57 @@
+#!/bin/bash
+set -eu
+
+
+# Dependencies
+# - hdf5 (1.8.9 / 1.10.1 or later, for netCDF-4 support)
+# - zlib (1.2.5 or later, for netCDF-4 compression)
+# - curl (7.18.0 or later, for DAP remote access client support)
+#
+# For most ISSM installations, only hdf5 will be necessary
+#
+
+# Constants
+#
+VER="4.7.2"
+CURL_ROOT="${ISSM_DIR}/externalpackages/curl/install"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/hdf5/install"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Environment
+#
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include -I${CURL_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib -L${CURL_ROOT}/lib"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/netcdf-c-${VER}.tar.gz" "netcdf-c-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf netcdf-c-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv netcdf-c-$VER/* src/
+rm -rf netcdf-c-$VER
+
+# Configure
+cd src
+./configure \
+ 	--prefix="${ISSM_DIR}/externalpackages/netcdf/install" \
+ 	--disable-doxygen
+
+# Compile, test, and install
+if [ $# -eq 0 ]; then
+	make
+	make check
+	make install
+else
+	make
+	make -j $1 check
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/netcdf/install-4.7.sh
===================================================================
--- /issm/trunk/externalpackages/netcdf/install-4.7.sh	(revision 24686)
+++ /issm/trunk/externalpackages/netcdf/install-4.7.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+# Dependencies
+# - hdf5 (1.8.9 / 1.10.1 or later, for netCDF-4 support)
+# - zlib (1.2.5 or later, for netCDF-4 compression)
+# - curl (7.18.0 or later, for DAP remote access client support)
+#
+# For most ISSM installations, only hdf5 will be necessary
+#
+
+# Constants
+#
+VER="4.7.2"
+CURL_ROOT="${ISSM_DIR}/externalpackages/curl/install"
+HDF5_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+ZLIB_ROOT="${ISSM_DIR}/externalpackages/petsc/install"
+
+# Environment
+#
+export CPPFLAGS="-I${HDF5_ROOT}/include -I${ZLIB_ROOT}/include -I${CURL_ROOT}/include"
+export LDFLAGS="-L${HDF5_ROOT}/lib -L${ZLIB_ROOT}/lib -L${CURL_ROOT}/lib"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/netcdf-c-${VER}.tar.gz" "netcdf-c-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf netcdf-c-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv netcdf-c-${VER}/* src/
+rm -rf netcdf-c-${VER}
+
+# Configure
+cd src
+./configure \
+ 	--prefix="${ISSM_DIR}/externalpackages/netcdf/install" \
+ 	--disable-doxygen
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/openblas/install-0.3-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/openblas/install-0.3-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/openblas/install-0.3-linux-static.sh	(revision 24686)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+
+# NOTE: Full LAPACK implementation is included in OpenBLAS
+#
+
+## Constants
+#
+PKG="OpenBLAS"
+VER="0.3.7"
+
+## Environment
+#
+export CC="${ISSM_DIR}/externalpackages/mpich/install/bin/mpicc"
+export CXX="${ISSM_DIR}/externalpackages/mpich/install/bin/mpicxx"
+export FC="${ISSM_DIR}/externalpackages/mpich/install/bin/mpifort"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/${PKG}-${VER}.tar.gz" "${PKG}-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf ${PKG}-${VER}.tar.gz
+
+# Cleanup
+rm -rf build install src
+mkdir build install install/lib src
+
+# Move source to 'src' directory
+mv ${PKG}-${VER}/* src
+rm -rf ${PKG}-${VER}
+
+# Configure
+#
+cd build
+cmake \
+	../src
+
+# Compile
+make
+
+# Install
+cd ..
+cp ./build/lib/* ./install/lib
Index: /issm/trunk/externalpackages/petsc/install-3.11-linux-solid_earth.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.11-linux-solid_earth.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.11-linux-solid_earth.sh	(revision 24686)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.11.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 \
+	--download-hdf5=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.11-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.11-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.11-linux-static.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.11.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE: Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.11-linux.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.11-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.11-linux.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.11.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: sm/trunk/externalpackages/petsc/install-3.11-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.11-linux64.sh	(revision 24685)
+++ 	(revision )
@@ -1,36 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf install petsc-3.11.0 src
-mkdir install src
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-lite-3.11.0.tar.gz' 'petsc-3.11.0.tar.gz'
-
-#Untar and move petsc to install directory
-tar -zxvf  petsc-3.11.0.tar.gz
-mv petsc-3.11.0/* src/
-rm -rf petsc-3.11.0
-
-#configure
-cd src
-./config/configure.py \
-	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-	--with-mpi-dir="$ISSM_DIR/externalpackages/mpich/install" \
-	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
-	--with-debugging=1 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-shared-libraries=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-mumps=1 \
-	--download-scalapack=1 \
-	--download-fblaslapack=1 \
-	--with-pic=1
-
-#Compile and intall
-make
-make install
Index: /issm/trunk/externalpackages/petsc/install-3.11-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.11-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.11-mac-static.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.11.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE: Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.11-mac.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.11-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.11-mac.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.11.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.12-linux-solid_earth.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-linux-solid_earth.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.12-linux-solid_earth.sh	(revision 24686)
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.12.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--download-zlib=1 \
+	--download-hdf5=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.12-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.12-linux-static.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.12.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE: Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.12-linux.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.12-linux.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.12.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.12-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.12-mac-static.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.12.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE: Cannot use --with-fpic option when compiling static libs,
+#
+#		Cannot determine compiler PIC flags if shared libraries is turned off
+#		Either run using --with-shared-libraries or --with-pic=0 and supply the
+#		compiler PIC flag via CFLAGS, CXXXFLAGS, and FCFLAGS
+#
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-shared-libraries=0 \
+	--CFLAGS="-fPIC" \
+	--CXXFLAGS="-fPIC" \
+	--FFLAGS="-fPIC -static-libgfortran" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.12-mac.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.12-mac.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.12.3"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-fblaslapack=1 \
+	--download-mpich=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.12-pleiades.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.12-pleiades.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.12-pleiades.sh	(revision 24686)
@@ -0,0 +1,63 @@
+#!/bin/bash
+set -eu
+
+#Some cleanup
+rm -rf install petsc-3.12.3 src
+mkdir install src
+
+#Download from ISSM server
+#$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/petsc-lite-3.12.3.tar.gz' 'petsc-3.12.3.tar.gz'
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-lite-3.12.3.tar.gz' 'petsc-3.12.3.tar.gz'
+
+#Untar and move petsc to install directory
+tar -zxvf  petsc-3.12.3.tar.gz
+mv petsc-3.12.3/* src/
+rm -rf petsc-3.12.3
+
+#configure (based on /nasa/petsc/3.7.5/intel_mpt/lib/petsc/conf/petscvariables look for CONFIGURE_OPTIONS)
+cd src
+./config/configure.py \
+	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
+	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
+	--with-cc=icc --with-cxx=icpc --with-fc=ifort --with-f77=ifort --with-gnu-compilers=0 --with-cpp=/usr/bin/cpp \
+	--with-vendor-compilers=intel \
+	-COPTFLAGS=-g -O3 -axCORE-AVX2,AVX -xSSE4.2 -CXXOPTFLAGS=-g -O3 -axCORE-AVX2,AVX -xSSE4.2 -FOPTFLAGS=-g -O3 -axCORE-AVX2,AVX -xSSE4.2 \
+	--with-blas-lapack-dir=/nasa/intel/Compiler/2016.2.181/mkl/lib/intel64 \
+	--with-scalapack-include=/nasa/intel/Compiler/2016.2.181/mkl/include \
+	--with-scalapack-lib="/nasa/intel/Compiler/2016.2.181/mkl/lib/intel64/libmkl_scalapack_lp64.so /nasa/intel/Compiler/2016.2.181/mkl/lib/intel64/libmkl_blacs_sgimpt_lp64.so" \
+	--with-shared-libraries=1 \
+	--known-mpi-shared-libraries=1 \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-batch=1  \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-mumps=1 
+
+#  --download-superlu_dist=yes \
+#	--download-hypre=yes
+
+#prepare script to reconfigure petsc
+cat > script.queue << EOF
+#PBS -S /bin/bash
+#PBS -l select=1:ncpus=1:model=bro
+#PBS -l walltime=200 
+#PBS -W group_list=s1690
+#PBS -m e 
+
+. /usr/share/modules/init/bash 
+module load pkgsrc
+module load comp-intel/2016.2.181
+module load mpi-sgi/mpt
+
+export PATH="$PATH:." 
+export MPI_GROUP_MAX=64 
+mpiexec -np 1 ./conftest-arch-linux2-c-opt
+EOF
+
+#print instructions
+echo "== Now: cd src/ "
+echo "== qsub -q devel script.queue "
+echo "== Then run reconfigure script generated by PETSc and follow instructions"
Index: /issm/trunk/externalpackages/petsc/install-3.6-simba-intel-mvapich2-2.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.6-simba-intel-mvapich2-2.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.6-simba-intel-mvapich2-2.sh	(revision 24686)
@@ -0,0 +1,69 @@
+#!/bin/bash
+set -eu
+
+#Some cleanup
+rm -rf install petsc-3.6.3 src
+mkdir install src
+
+#Download from ISSM server
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/petsc-lite-3.6.3.tar.gz' 'petsc-3.6.3.tar.gz'
+
+#Untar and move petsc to install directory
+tar -zxvf  petsc-3.6.3.tar.gz
+mv petsc-3.6.3/* src/
+rm -rf petsc-3.6.3
+
+# configure
+cd src
+./configure --prefix="$ISSM_DIR/externalpackages/petsc/install-$VER-intel-mvapich2-2" \
+	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src-$VER-intel-mvapich2-2"\
+	--with-mpi-dir="/usr/local/mpi/intel18/mvapich2-2.2/" \
+	--known-mpi-shared-libraries \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-shared-libraries=1 \
+	--with-batch=1 \
+	--download-mumps=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-fblaslapack=1 \
+	--download-scalapack=1 
+
+#prepare script to reconfigure petsc
+cd $ISSM_DIR/externalpackages/petsc/src
+
+PBS_O_WORKDIR=$PWD
+
+cat > script.pbs << EOF
+#PBS -S /bin/bash
+#PBS -q batch
+#PBS -l nodes=1:ppn=1
+
+# Set qsub location
+PBS_O_WORKDIR=$PWD
+cd $PBS_O_WORKDIR
+
+# set mpi module
+module load intel18/compiler-18
+module load intel18/mvapich2-2.2
+module rm   intel18/openmpi-2.0.2
+module rm   intel18/impi-18
+
+echo "check module"
+module list
+
+mpirun -machinefile $PBS_O_WORKDIR/mpi.host -np 1 $PBS_O_WORKDIR/conftest-arch-linux2-c-opt  
+EOF
+
+# generate mpi.host file
+cat > mpi.host <<EOF
+simba02:36
+EOF
+
+#print instructions
+echo "== Now: cd src/ "
+echo "== qsub script.pbs "
+echo "== Then run reconfigure script generated by PETSc and follow instructions"
+echo "== If it works good, do ./reconfigure-arch-linux2-c-opt.py"
Index: /issm/trunk/externalpackages/petsc/install-3.7-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.7-linux-static.sh	(revision 24686)
@@ -0,0 +1,58 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.7.6"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf  petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+#
+# NOTE:
+# - Despite the fact that the with-pic option is used, external packages
+#	require the fPIC compiler flag to be supplied directly to the compilers in
+#	order for position independent code to be generated.
+#
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-shared-libraries=0 \
+	--with-mpi-dir="${ISSM_DIR}/externalpackages/mpich/install" \
+	--with-blas-lib="-L${ISSM_DIR}/externalpackages/lapack/install/lib -lblas" \
+	--with-lapack-lib="-L${ISSM_DIR}/externalpackages/lapack/install/lib -llapack" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--COPTFLAGS="-fPIC" \
+	--CXXOPTFLAGS="-fPIC" \
+	--FOPTFLAGS="-fPIC"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/petsc/install-3.7-linux.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.7-linux.sh	(revision 24686)
@@ -0,0 +1,54 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.7.6"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-$VER/* src/
+rm -rf petsc-$VER
+
+# Configure
+#
+# NOTE:
+# - Despite the fact that the with-pic option is used, external packages
+#	require the fPIC compiler flag to be supplied directly to the compilers in
+#	order for position independent code to be generated.
+#
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-mpi-dir="${ISSM_DIR}/externalpackages/mpich/install" \
+	--with-blas-lib="-L${ISSM_DIR}/externalpackages/lapack/install/lib -lblas" \
+	--with-lapack-lib="-L${ISSM_DIR}/externalpackages/lapack/install/lib -llapack" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-pic=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/petsc/install-3.7-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-linux64.sh	(revision 24685)
+++ /issm/trunk/externalpackages/petsc/install-3.7-linux64.sh	(revision 24686)
@@ -2,22 +2,31 @@
 set -eu
 
-#Some cleanup
-rm -rf install petsc-3.7.6 src
+
+## Constants
+#
+VER="3.7.6"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf  petsc-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
 mkdir install src
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/petsc-lite-3.7.6.tar.gz' 'petsc-3.7.6.tar.gz'
+# Move source to 'src' directory
+mv petsc-$VER/* src/
+rm -rf petsc-$VER
 
-#Untar and move petsc to install directory
-tar -zxvf  petsc-3.7.6.tar.gz
-mv petsc-3.7.6/* src/
-rm -rf petsc-3.7.6
-
-#configure
+# Configure
 cd src
 ./config/configure.py \
-	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-	--with-mpi-dir="$ISSM_DIR/externalpackages/mpich/install" \
-	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-mpi-dir="${ISSM_DIR}/externalpackages/mpich/install" \
+	--with-blas-lib="-L${ISSM_DIR}/externalpackages/lapack/install/lib -lblas" \
+	--with-lapack-lib="-L${ISSM_DIR}/externalpackages/lapack/install/lib -llapack" \
 	--with-debugging=0 \
 	--with-valgrind=0 \
@@ -27,10 +36,16 @@
 	--download-metis=1 \
 	--download-parmetis=1 \
-	--download-mumps=1 \
 	--download-scalapack=1 \
-	--download-fblaslapack=1 \
-	--with-pic=1
+	--download-mumps=1
 
-#Compile and intall
-make
-make install
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/externalpackages/petsc/install-3.7-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.7-mac-static.sh	(revision 24686)
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -eu
+
+#Some cleanup
+rm -rf install petsc-3.7.6 src
+mkdir install src
+
+#Download from ISSM server
+$ISSM_DIR/scripts/DownloadExternalPackage.sh 'http://issm.jpl.nasa.gov/files/externalpackages/petsc-lite-3.7.6.tar.gz' 'petsc-3.7.6.tar.gz'
+
+#Untar and move petsc to install directory
+tar -zxvf  petsc-3.7.6.tar.gz
+mv petsc-3.7.6/* src/
+rm -rf petsc-3.7.6
+
+#configure
+cd src
+./config/configure.py \
+	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
+	--with-single-library=1 \
+	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
+	--with-shared-libraries=0 \
+	--with-debugging=0 \
+	--with-mpi-dir="$ISSM_DIR/externalpackages/mpich/install" \
+	--download-fblaslapack=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--with-x=0 \
+	--download-scalapack=1 \
+	--download-mumps=1 \
+	--with-ssl=0 \
+	--with-valgrind=0 \
+	--with-pic=1 \
+	--COPTFLAGS="-fPIC" \
+	--CXXOPTFLAGS="-fPIC" \
+	--FOPTFLAGS="-fPIC"
+
+#Compile and intall
+make
+make install
Index: /issm/trunk/externalpackages/petsc/install-3.7-mac.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/petsc/install-3.7-mac.sh	(revision 24686)
@@ -0,0 +1,46 @@
+#!/bin/bash
+set -eu
+
+
+## Constants
+#
+VER="3.7.6"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/petsc-lite-${VER}.tar.gz" "petsc-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf petsc-${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv petsc-${VER}/* src/
+rm -rf petsc-${VER}
+
+# Configure
+cd src
+./config/configure.py \
+	--prefix="${ISSM_DIR}/externalpackages/petsc/install" \
+	--PETSC_DIR="${ISSM_DIR}/externalpackages/petsc/src" \
+	--with-mpi-dir="${ISSM_DIR}/externalpackages/mpich/install" \
+	--with-debugging=0 \
+	--with-valgrind=0 \
+	--with-x=0 \
+	--with-ssl=0 \
+	--with-shared-libraries=1 \
+	--download-metis=1 \
+	--download-parmetis=1 \
+	--download-scalapack=1 \
+	--download-mumps=1
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: sm/trunk/externalpackages/petsc/install-3.7-macosx64-static.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-macosx64-static.sh	(revision 24685)
+++ 	(revision )
@@ -1,40 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf install petsc-3.7.6 src
-mkdir install src
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'http://issm.jpl.nasa.gov/files/externalpackages/petsc-lite-3.7.6.tar.gz' 'petsc-3.7.6.tar.gz'
-
-#Untar and move petsc to install directory
-tar -zxvf  petsc-3.7.6.tar.gz
-mv petsc-3.7.6/* src/
-rm -rf petsc-3.7.6
-
-#configure
-cd src
-./config/configure.py \
-	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-	--with-single-library=1 \
-	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
-	--with-shared-libraries=0 \
-	--with-debugging=0 \
-	--with-mpi-dir="$ISSM_DIR/externalpackages/mpich/install" \
-	--download-fblaslapack=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--with-x=0 \
-	--download-scalapack=1 \
-	--download-mumps=1 \
-	--with-ssl=0 \
-	--with-valgrind=0 \
-	--with-pic=1 \
-	--COPTFLAGS="-fPIC" \
-	--CXXOPTFLAGS="-fPIC" \
-	--FOPTFLAGS="-fPIC"
-
-#Compile and intall
-make
-make install
Index: sm/trunk/externalpackages/petsc/install-3.7-macosx64.sh
===================================================================
--- /issm/trunk/externalpackages/petsc/install-3.7-macosx64.sh	(revision 24685)
+++ 	(revision )
@@ -1,34 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup
-rm -rf install petsc-3.7.6 src
-mkdir install src
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/petsc-lite-3.7.6.tar.gz' 'petsc-3.7.6.tar.gz'
-
-#Untar and move petsc to install directory
-tar -zxvf  petsc-3.7.6.tar.gz
-mv petsc-3.7.6/* src/
-rm -rf petsc-3.7.6
-
-#configure
-cd src
-./config/configure.py \
-	--prefix="$ISSM_DIR/externalpackages/petsc/install" \
-	--with-mpi-dir="$ISSM_DIR/externalpackages/mpich/install" \
-	--PETSC_DIR="$ISSM_DIR/externalpackages/petsc/src" \
-	--with-debugging=1 \
-	--with-valgrind=0 \
-	--with-x=0 \
-	--with-ssl=0 \
-	--with-shared-libraries=1 \
-	--download-metis=1 \
-	--download-parmetis=1 \
-	--download-mumps=1 \
-	--download-scalapack=1
-
-#Compile and intall
-make
-make install
Index: /issm/trunk/externalpackages/proj/install-6.2-with_tests.sh
===================================================================
--- /issm/trunk/externalpackages/proj/install-6.2-with_tests.sh	(revision 24686)
+++ /issm/trunk/externalpackages/proj/install-6.2-with_tests.sh	(revision 24686)
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="6.2.1"
+
+## Environnment
+#
+
+# NOTE: On macOS, SQLite3 should be installed by default, but PROJ currently
+#		requires,
+#
+#			SQLITE3_LIBS="-lsqlite3".
+#
+#		On Ubuntu Linux, install the SQLite3 binary, libraries and headers
+#		with,
+#
+#			`apt-get install sqlite3 libsqlite3-dev`
+#
+export SQLITE3_LIBS="-lsqlite3"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/proj-${VER}.tar.gz" "proj-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf proj-$VER.tar.gz
+
+# Move source into 'src' directory
+mv proj-$VER/* src
+rm -rf proj-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/proj/install"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make check
+	make install
+else
+	make -j $1
+	make -j $1 check
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/proj/install-6.2.sh
===================================================================
--- /issm/trunk/externalpackages/proj/install-6.2.sh	(revision 24686)
+++ /issm/trunk/externalpackages/proj/install-6.2.sh	(revision 24686)
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="6.2.1"
+
+## Environnment
+#
+
+# NOTE: On macOS, SQLite3 should be installed by default, but PROJ currently
+#		requires,
+#
+#			SQLITE3_LIBS="-lsqlite3".
+#
+#		On Ubuntu Linux, install the SQLite3 binary, libraries and headers
+#		with,
+#
+#			`apt-get install sqlite3 libsqlite3-dev`
+#
+export SQLITE3_LIBS="-lsqlite3"
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/proj-${VER}.tar.gz" "proj-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf proj-$VER.tar.gz
+
+# Move source into 'src' directory
+mv proj-$VER/* src
+rm -rf proj-$VER
+
+# Configure
+cd src
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/proj/install"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
Index: /issm/trunk/externalpackages/scalapack/configs/2.0/linux/SLmake.inc.static
===================================================================
--- /issm/trunk/externalpackages/scalapack/configs/2.0/linux/SLmake.inc.static	(revision 24686)
+++ /issm/trunk/externalpackages/scalapack/configs/2.0/linux/SLmake.inc.static	(revision 24686)
@@ -0,0 +1,60 @@
+############################################################################
+#
+#  Program:         ScaLAPACK
+#
+#  Module:          SLmake.inc
+#
+#  Purpose:         Top-level Definitions
+#
+#  Creation date:   February 15, 2000
+#
+#  Modified:        October 13, 2011
+#
+#  Send bug reports, comments or suggestions to scalapack@cs.utk.edu
+#
+############################################################################
+#
+#  C preprocessor definitions:  set CDEFS to one of the following:
+#
+#     -DNoChange (fortran subprogram names are lower case without any suffix)
+#     -DUpCase   (fortran subprogram names are upper case without any suffix)
+#     -DAdd_     (fortran subprogram names are lower case with "_" appended)
+
+CDEFS         = -DAdd_
+
+#
+#  The fortran and C compilers, loaders, and their flags
+#
+
+FC            = ${ISSM_DIR}/externalpackages/mpich/install/bin/mpifort
+CC            = ${ISSM_DIR}/externalpackages/mpich/install/bin/mpicc
+NOOPT         = -O0 -fPIC
+FCFLAGS       = -O3 -fPIC
+CCFLAGS       = -O3 -fPIC
+FCLOADER      = $(FC)
+CCLOADER      = $(CC)
+FCLOADFLAGS   = $(FCFLAGS)
+CCLOADFLAGS   = $(CCFLAGS)
+
+#
+#  The archiver and the flag(s) to use when building archive (library)
+#  Also the ranlib routine.  If your system has no ranlib, set RANLIB = echo
+#
+
+ARCH          = ar
+ARCHFLAGS     = cr
+RANLIB        = ranlib
+
+#
+#  The name of the ScaLAPACK library to be created
+#
+
+SCALAPACKLIB  = libscalapack.a
+
+#
+#  BLAS, LAPACK (and possibly other) libraries needed for linking test programs
+#
+
+BLASLIB       = -L${ISSM_DIR}/externalpackages/openblas/install/lib -lopenblas
+LAPACKLIB     = -L${ISSM_DIR}/externalpackages/openblas/install/lib -lopenblas
+LIBS          = $(LAPACKLIB) $(BLASLIB)
Index: /issm/trunk/externalpackages/scalapack/install-2.0-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/scalapack/install-2.0-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/scalapack/install-2.0-linux-static.sh	(revision 24686)
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -eu
+
+
+## NOTE: This install script uses make directly rather than CMake and then make
+
+## Constants
+#
+PKG="scalapack"
+VER="2.0.2"
+
+## Environment
+#
+export MPI_BASE_DIR="${ISSM_DIR}/externalpackages/mpich/install"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/${PKG}-${VER}.tgz" "${PKG}-${VER}.tgz"
+
+# Unpack source
+tar -zxvf ${PKG}-${VER}.tgz
+
+# Cleanup
+rm -rf build install src
+mkdir build install install/lib src
+
+# Move source to 'src' directory
+mv ${PKG}-${VER}/* src
+rm -rf ${PKG}-${VER}
+
+# Copy customized source and config files to 'src' directory
+cp configs/2.0/linux/SLmake.inc.static src/SLmake.inc
+
+# Compile
+cd src
+if [ $# -eq 0 ]; then
+	make
+else
+	make -j $1
+fi
+
+# Install
+cd ..
+cp ./src/lib*.* ./install/lib
Index: /issm/trunk/externalpackages/scotch/configs/6.0/linux/Makefile.inc.linux-parallel-static
===================================================================
--- /issm/trunk/externalpackages/scotch/configs/6.0/linux/Makefile.inc.linux-parallel-static	(revision 24686)
+++ /issm/trunk/externalpackages/scotch/configs/6.0/linux/Makefile.inc.linux-parallel-static	(revision 24686)
@@ -0,0 +1,21 @@
+EXE		=
+LIB		= .a
+OBJ		= .o
+
+MAKE		= make
+AR		= ar
+ARFLAGS		= -ruv
+CAT		= cat
+CCS		= gcc
+CCP		= ${ISSM_DIR}/externalpackages/mpich/install/bin/mpicc
+CCD		= gcc
+CFLAGS		= -O3 -fPIC -DCOMMON_FILE_COMPRESS_GZ -DCOMMON_PTHREAD -DCOMMON_RANDOM_FIXED_SEED -DSCOTCH_RENAME -DSCOTCH_PTHREAD -Drestrict=__restrict -DIDXSIZE64
+CLIBFLAGS	=
+LDFLAGS		= -lz -lm -lrt -pthread
+CP		= cp
+LEX		= flex -Pscotchyy -olex.yy.c
+LN		= ln
+MKDIR		= mkdir -p
+MV		= mv
+RANLIB		= ranlib
+YACC		= bison -pscotchyy -y -b y
Index: /issm/trunk/externalpackages/scotch/install-6.0-linux-parallel-static.sh
===================================================================
--- /issm/trunk/externalpackages/scotch/install-6.0-linux-parallel-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/scotch/install-6.0-linux-parallel-static.sh	(revision 24686)
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -eu
+
+
+## NOTE: This install script uses make directly rather than CMake and then make
+
+## Constants
+#
+PKG="scotch"
+VER="6.0.9"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/${PKG}_${VER}.tar.gz" "${PKG}_${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf ${PKG}_${VER}.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install install/include install/lib src
+
+# Move source to 'src' directory
+mv ${PKG}_${VER}/* src
+rm -rf ${PKG}_${VER}
+
+# Copy customized source and config files to 'src' directory
+cp configs/6.0/linux/Makefile.inc.linux-parallel-static src/src/Makefile.inc
+
+# Compile
+cd src/src
+if [ $# -eq 0 ]; then
+	make ptscotch
+	make ptesmumps
+else
+	make -j $1 ptscotch
+	make -j $1 ptesmumps
+fi
+
+# Install
+cd ../..
+cp src/include/* install/include
+cp src/lib/lib*.* install/lib
Index: /issm/trunk/externalpackages/sqlite/install.sh
===================================================================
--- /issm/trunk/externalpackages/sqlite/install.sh	(revision 24685)
+++ /issm/trunk/externalpackages/sqlite/install.sh	(revision 24686)
@@ -2,29 +2,32 @@
 set -eu
 
-#Some cleanup
-rm -rf src
-rm -rf install
-rm -rf sqlite-autoconf-3071300
-mkdir src install
 
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/sqlite-autoconf-3071300.tar.gz' 'sqlite-autoconf-3071300.tar.gz'
+VER="3300100"
 
-#Untar 
-tar -zxvf  sqlite-autoconf-3071300.tar.gz
+# Cleanup
+rm -rf install src
+mkdir install src
 
-#Move sqlite-autoconf into src directory
-mv sqlite-autoconf-3071300/* src
-rm -rf sqlite-autoconf-3071300
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "http://issm.jpl.nasa.gov/files/externalpackages/sqlite-autoconf-${VER}.tar.gz" "sqlite-autoconf-${VER}.tar.gz"
 
-#Configure sqlite-autoconf
+# Unpack source
+tar -zxvf sqlite-autoconf-$VER.tar.gz
+
+# Move source into 'src' directory
+mv sqlite-autoconf-$VER/* src
+rm -rf sqlite-autoconf-$VER
+
+# Configure
 cd src
-./configure  --prefix="$ISSM_DIR/externalpackages/sqlite/install" 
+./configure \
+	--prefix="${ISSM_DIR}/externalpackages/sqlite/install"
 
-#Compile and install sqlite-autoconf
+# Compile and install
 if [ $# -eq 0 ]; then
 	make
+	make install
 else
 	make -j $1
+	make -j $1 install
 fi
-make install
Index: /issm/trunk/externalpackages/triangle/configs/javascript/triangle.h
===================================================================
--- /issm/trunk/externalpackages/triangle/configs/javascript/triangle.h	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/configs/javascript/triangle.h	(revision 24686)
@@ -0,0 +1,296 @@
+/*****************************************************************************/
+/*                                                                           */
+/*  (triangle.h)                                                             */
+/*                                                                           */
+/*  Include file for programs that call Triangle.                            */
+/*                                                                           */
+/*  Accompanies Triangle Version 1.6                                         */
+/*  July 28, 2005                                                            */
+/*                                                                           */
+/*  Copyright 1996, 2005                                                     */
+/*  Jonathan Richard Shewchuk                                                */
+/*  2360 Woolsey #H                                                          */
+/*  Berkeley, California  94705-1927                                         */
+/*  jrs@cs.berkeley.edu                                                      */
+/*                                                                           */
+/*****************************************************************************/
+
+/*****************************************************************************/
+/*                                                                           */
+/*  How to call Triangle from another program                                */
+/*                                                                           */
+/*                                                                           */
+/*  If you haven't read Triangle's instructions (run "triangle -h" to read   */
+/*  them), you won't understand what follows.                                */
+/*                                                                           */
+/*  Triangle must be compiled into an object file (triangle.o) with the      */
+/*  TRILIBRARY symbol defined (generally by using the -DTRILIBRARY compiler  */
+/*  switch).  The makefile included with Triangle will do this for you if    */
+/*  you run "make trilibrary".  The resulting object file can be called via  */
+/*  the procedure triangulate().                                             */
+/*                                                                           */
+/*  If the size of the object file is important to you, you may wish to      */
+/*  generate a reduced version of triangle.o.  The REDUCED symbol gets rid   */
+/*  of all features that are primarily of research interest.  Specifically,  */
+/*  the -DREDUCED switch eliminates Triangle's -i, -F, -s, and -C switches.  */
+/*  The CDT_ONLY symbol gets rid of all meshing algorithms above and beyond  */
+/*  constrained Delaunay triangulation.  Specifically, the -DCDT_ONLY switch */
+/*  eliminates Triangle's -r, -q, -a, -u, -D, -Y, -S, and -s switches.       */
+/*                                                                           */
+/*  IMPORTANT:  These definitions (TRILIBRARY, REDUCED, CDT_ONLY) must be    */
+/*  made in the makefile or in triangle.c itself.  Putting these definitions */
+/*  in this file (triangle.h) will not create the desired effect.            */
+/*                                                                           */
+/*                                                                           */
+/*  The calling convention for triangulate() follows.                        */
+/*                                                                           */
+/*      void triangulate(triswitches, in, out, vorout)                       */
+/*      char *triswitches;                                                   */
+/*      struct triangulateio *in;                                            */
+/*      struct triangulateio *out;                                           */
+/*      struct triangulateio *vorout;                                        */
+/*                                                                           */
+/*  `triswitches' is a string containing the command line switches you wish  */
+/*  to invoke.  No initial dash is required.  Some suggestions:              */
+/*                                                                           */
+/*  - You'll probably find it convenient to use the `z' switch so that       */
+/*    points (and other items) are numbered from zero.  This simplifies      */
+/*    indexing, because the first item of any type always starts at index    */
+/*    [0] of the corresponding array, whether that item's number is zero or  */
+/*    one.                                                                   */
+/*  - You'll probably want to use the `Q' (quiet) switch in your final code, */
+/*    but you can take advantage of Triangle's printed output (including the */
+/*    `V' switch) while debugging.                                           */
+/*  - If you are not using the `q', `a', `u', `D', `j', or `s' switches,     */
+/*    then the output points will be identical to the input points, except   */
+/*    possibly for the boundary markers.  If you don't need the boundary     */
+/*    markers, you should use the `N' (no nodes output) switch to save       */
+/*    memory.  (If you do need boundary markers, but need to save memory, a  */
+/*    good nasty trick is to set out->pointlist equal to in->pointlist       */
+/*    before calling triangulate(), so that Triangle overwrites the input    */
+/*    points with identical copies.)                                         */
+/*  - The `I' (no iteration numbers) and `g' (.off file output) switches     */
+/*    have no effect when Triangle is compiled with TRILIBRARY defined.      */
+/*                                                                           */
+/*  `in', `out', and `vorout' are descriptions of the input, the output,     */
+/*  and the Voronoi output.  If the `v' (Voronoi output) switch is not used, */
+/*  `vorout' may be NULL.  `in' and `out' may never be NULL.                 */
+/*                                                                           */
+/*  Certain fields of the input and output structures must be initialized,   */
+/*  as described below.                                                      */
+/*                                                                           */
+/*****************************************************************************/
+
+/*****************************************************************************/
+/*                                                                           */
+/*  The `triangulateio' structure.                                           */
+/*                                                                           */
+/*  Used to pass data into and out of the triangulate() procedure.           */
+/*                                                                           */
+/*                                                                           */
+/*  Arrays are used to store points, triangles, markers, and so forth.  In   */
+/*  all cases, the first item in any array is stored starting at index [0].  */
+/*  However, that item is item number `1' unless the `z' switch is used, in  */
+/*  which case it is item number `0'.  Hence, you may find it easier to      */
+/*  index points (and triangles in the neighbor list) if you use the `z'     */
+/*  switch.  Unless, of course, you're calling Triangle from a Fortran       */
+/*  program.                                                                 */
+/*                                                                           */
+/*  Description of fields (except the `numberof' fields, which are obvious): */
+/*                                                                           */
+/*  `pointlist':  An array of point coordinates.  The first point's x        */
+/*    coordinate is at index [0] and its y coordinate at index [1], followed */
+/*    by the coordinates of the remaining points.  Each point occupies two   */
+/*    REALs.                                                                 */
+/*  `pointattributelist':  An array of point attributes.  Each point's       */
+/*    attributes occupy `numberofpointattributes' REALs.                     */
+/*  `pointmarkerlist':  An array of point markers; one int per point.        */
+/*                                                                           */
+/*  `trianglelist':  An array of triangle corners.  The first triangle's     */
+/*    first corner is at index [0], followed by its other two corners in     */
+/*    counterclockwise order, followed by any other nodes if the triangle    */
+/*    represents a nonlinear element.  Each triangle occupies                */
+/*    `numberofcorners' ints.                                                */
+/*  `triangleattributelist':  An array of triangle attributes.  Each         */
+/*    triangle's attributes occupy `numberoftriangleattributes' REALs.       */
+/*  `trianglearealist':  An array of triangle area constraints; one REAL per */
+/*    triangle.  Input only.                                                 */
+/*  `neighborlist':  An array of triangle neighbors; three ints per          */
+/*    triangle.  Output only.                                                */
+/*                                                                           */
+/*  `segmentlist':  An array of segment endpoints.  The first segment's      */
+/*    endpoints are at indices [0] and [1], followed by the remaining        */
+/*    segments.  Two ints per segment.                                       */
+/*  `segmentmarkerlist':  An array of segment markers; one int per segment.  */
+/*                                                                           */
+/*  `holelist':  An array of holes.  The first hole's x and y coordinates    */
+/*    are at indices [0] and [1], followed by the remaining holes.  Two      */
+/*    REALs per hole.  Input only, although the pointer is copied to the     */
+/*    output structure for your convenience.                                 */
+/*                                                                           */
+/*  `regionlist':  An array of regional attributes and area constraints.     */
+/*    The first constraint's x and y coordinates are at indices [0] and [1], */
+/*    followed by the regional attribute at index [2], followed by the       */
+/*    maximum area at index [3], followed by the remaining area constraints. */
+/*    Four REALs per area constraint.  Note that each regional attribute is  */
+/*    used only if you select the `A' switch, and each area constraint is    */
+/*    used only if you select the `a' switch (with no number following), but */
+/*    omitting one of these switches does not change the memory layout.      */
+/*    Input only, although the pointer is copied to the output structure for */
+/*    your convenience.                                                      */
+/*                                                                           */
+/*  `edgelist':  An array of edge endpoints.  The first edge's endpoints are */
+/*    at indices [0] and [1], followed by the remaining edges.  Two ints per */
+/*    edge.  Output only.                                                    */
+/*  `edgemarkerlist':  An array of edge markers; one int per edge.  Output   */
+/*    only.                                                                  */
+/*  `normlist':  An array of normal vectors, used for infinite rays in       */
+/*    Voronoi diagrams.  The first normal vector's x and y magnitudes are    */
+/*    at indices [0] and [1], followed by the remaining vectors.  For each   */
+/*    finite edge in a Voronoi diagram, the normal vector written is the     */
+/*    zero vector.  Two REALs per edge.  Output only.                        */
+/*                                                                           */
+/*                                                                           */
+/*  Any input fields that Triangle will examine must be initialized.         */
+/*  Furthermore, for each output array that Triangle will write to, you      */
+/*  must either provide space by setting the appropriate pointer to point    */
+/*  to the space you want the data written to, or you must initialize the    */
+/*  pointer to NULL, which tells Triangle to allocate space for the results. */
+/*  The latter option is preferable, because Triangle always knows exactly   */
+/*  how much space to allocate.  The former option is provided mainly for    */
+/*  people who need to call Triangle from Fortran code, though it also makes */
+/*  possible some nasty space-saving tricks, like writing the output to the  */
+/*  same arrays as the input.                                                */
+/*                                                                           */
+/*  Triangle will not free() any input or output arrays, including those it  */
+/*  allocates itself; that's up to you.  You should free arrays allocated by */
+/*  Triangle by calling the trifree() procedure defined below.  (By default, */
+/*  trifree() just calls the standard free() library procedure, but          */
+/*  applications that call triangulate() may replace trimalloc() and         */
+/*  trifree() in triangle.c to use specialized memory allocators.)           */
+/*                                                                           */
+/*  Here's a guide to help you decide which fields you must initialize       */
+/*  before you call triangulate().                                           */
+/*                                                                           */
+/*  `in':                                                                    */
+/*                                                                           */
+/*    - `pointlist' must always point to a list of points; `numberofpoints'  */
+/*      and `numberofpointattributes' must be properly set.                  */
+/*      `pointmarkerlist' must either be set to NULL (in which case all      */
+/*      markers default to zero), or must point to a list of markers.  If    */
+/*      `numberofpointattributes' is not zero, `pointattributelist' must     */
+/*      point to a list of point attributes.                                 */
+/*    - If the `r' switch is used, `trianglelist' must point to a list of    */
+/*      triangles, and `numberoftriangles', `numberofcorners', and           */
+/*      `numberoftriangleattributes' must be properly set.  If               */
+/*      `numberoftriangleattributes' is not zero, `triangleattributelist'    */
+/*      must point to a list of triangle attributes.  If the `a' switch is   */
+/*      used (with no number following), `trianglearealist' must point to a  */
+/*      list of triangle area constraints.  `neighborlist' may be ignored.   */
+/*    - If the `p' switch is used, `segmentlist' must point to a list of     */
+/*      segments, `numberofsegments' must be properly set, and               */
+/*      `segmentmarkerlist' must either be set to NULL (in which case all    */
+/*      markers default to zero), or must point to a list of markers.        */
+/*    - If the `p' switch is used without the `r' switch, then               */
+/*      `numberofholes' and `numberofregions' must be properly set.  If      */
+/*      `numberofholes' is not zero, `holelist' must point to a list of      */
+/*      holes.  If `numberofregions' is not zero, `regionlist' must point to */
+/*      a list of region constraints.                                        */
+/*    - If the `p' switch is used, `holelist', `numberofholes',              */
+/*      `regionlist', and `numberofregions' is copied to `out'.  (You can    */
+/*      nonetheless get away with not initializing them if the `r' switch is */
+/*      used.)                                                               */
+/*    - `edgelist', `edgemarkerlist', `normlist', and `numberofedges' may be */
+/*      ignored.                                                             */
+/*                                                                           */
+/*  `out':                                                                   */
+/*                                                                           */
+/*    - `pointlist' must be initialized (NULL or pointing to memory) unless  */
+/*      the `N' switch is used.  `pointmarkerlist' must be initialized       */
+/*      unless the `N' or `B' switch is used.  If `N' is not used and        */
+/*      `in->numberofpointattributes' is not zero, `pointattributelist' must */
+/*      be initialized.                                                      */
+/*    - `trianglelist' must be initialized unless the `E' switch is used.    */
+/*      `neighborlist' must be initialized if the `n' switch is used.  If    */
+/*      the `E' switch is not used and (`in->numberofelementattributes' is   */
+/*      not zero or the `A' switch is used), `elementattributelist' must be  */
+/*      initialized.  `trianglearealist' may be ignored.                     */
+/*    - `segmentlist' must be initialized if the `p' or `c' switch is used,  */
+/*      and the `P' switch is not used.  `segmentmarkerlist' must also be    */
+/*      initialized under these circumstances unless the `B' switch is used. */
+/*    - `edgelist' must be initialized if the `e' switch is used.            */
+/*      `edgemarkerlist' must be initialized if the `e' switch is used and   */
+/*      the `B' switch is not.                                               */
+/*    - `holelist', `regionlist', `normlist', and all scalars may be ignored.*/
+/*                                                                           */
+/*  `vorout' (only needed if `v' switch is used):                            */
+/*                                                                           */
+/*    - `pointlist' must be initialized.  If `in->numberofpointattributes'   */
+/*      is not zero, `pointattributelist' must be initialized.               */
+/*      `pointmarkerlist' may be ignored.                                    */
+/*    - `edgelist' and `normlist' must both be initialized.                  */
+/*      `edgemarkerlist' may be ignored.                                     */
+/*    - Everything else may be ignored.                                      */
+/*                                                                           */
+/*  After a call to triangulate(), the valid fields of `out' and `vorout'    */
+/*  will depend, in an obvious way, on the choice of switches used.  Note    */
+/*  that when the `p' switch is used, the pointers `holelist' and            */
+/*  `regionlist' are copied from `in' to `out', but no new space is          */
+/*  allocated; be careful that you don't free() the same array twice.  On    */
+/*  the other hand, Triangle will never copy the `pointlist' pointer (or any */
+/*  others); new space is allocated for `out->pointlist', or if the `N'      */
+/*  switch is used, `out->pointlist' remains uninitialized.                  */
+/*                                                                           */
+/*  All of the meaningful `numberof' fields will be properly set; for        */
+/*  instance, `numberofedges' will represent the number of edges in the      */
+/*  triangulation whether or not the edges were written.  If segments are    */
+/*  not used, `numberofsegments' will indicate the number of boundary edges. */
+/*                                                                           */
+/*****************************************************************************/
+
+/*Patch for ISSM*/
+#ifndef REAL
+typedef double REAL;
+typedef void VOID;
+#endif
+/*End patch*/
+
+struct triangulateio {
+  REAL *pointlist;                                               /* In / out */
+  REAL *pointattributelist;                                      /* In / out */
+  int *pointmarkerlist;                                          /* In / out */
+  int numberofpoints;                                            /* In / out */
+  int numberofpointattributes;                                   /* In / out */
+
+  int *trianglelist;                                             /* In / out */
+  REAL *triangleattributelist;                                   /* In / out */
+  REAL *trianglearealist;                                         /* In only */
+  int *neighborlist;                                             /* Out only */
+  int numberoftriangles;                                         /* In / out */
+  int numberofcorners;                                           /* In / out */
+  int numberoftriangleattributes;                                /* In / out */
+
+  int *segmentlist;                                              /* In / out */
+  int *segmentmarkerlist;                                        /* In / out */
+  int numberofsegments;                                          /* In / out */
+
+  REAL *holelist;                        /* In / pointer to array copied out */
+  int numberofholes;                                      /* In / copied out */
+
+  REAL *regionlist;                      /* In / pointer to array copied out */
+  int numberofregions;                                    /* In / copied out */
+
+  int *edgelist;                                                 /* Out only */
+  int *edgemarkerlist;            /* Not used with Voronoi diagram; out only */
+  REAL *normlist;                /* Used only with Voronoi diagram; out only */
+  int numberofedges;                                             /* Out only */
+};
+
+#ifdef ANSI_DECLARATORS
+void triangulate(char *, struct triangulateio *, struct triangulateio *,
+                 struct triangulateio *);
+void trifree(VOID *memptr);
+#else /* not ANSI_DECLARATORS */
+void triangulate();
+void trifree();
+#endif /* not ANSI_DECLARATORS */
Index: /issm/trunk/externalpackages/triangle/configs/linux/configure.make
===================================================================
--- /issm/trunk/externalpackages/triangle/configs/linux/configure.make	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/configs/linux/configure.make	(revision 24686)
@@ -0,0 +1,6 @@
+CC=gcc
+CSWITCHES= $(CFLAGS) -DNO_TIMER -fpic
+TRILIBDEFS= -DTRILIBRARY
+STATIC_LIB_EXT=a
+SHARED_LIB_EXT=so
+OBJ_EXT=o
Index: /issm/trunk/externalpackages/triangle/configs/mac/configure.make
===================================================================
--- /issm/trunk/externalpackages/triangle/configs/mac/configure.make	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/configs/mac/configure.make	(revision 24686)
@@ -0,0 +1,6 @@
+CC=gcc
+CSWITCHES= $(CFLAGS) -DNO_TIMER -fpic
+TRILIBDEFS= -DTRILIBRARY
+STATIC_LIB_EXT=a
+SHARED_LIB_EXT=dylib
+OBJ_EXT=o
Index: /issm/trunk/externalpackages/triangle/configs/makefile
===================================================================
--- /issm/trunk/externalpackages/triangle/configs/makefile	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/configs/makefile	(revision 24686)
@@ -0,0 +1,46 @@
+include ./configure.make
+
+ifeq "$(origin CC)" "undefined"
+	CC = cc
+endif
+
+ifeq "$(origin AR)" "undefined"
+	AR = ar
+endif
+
+ifeq "$(origin RANLIB)" "undefined"
+	RANLIB = ranlib
+endif
+
+SOURCES=triangle.c triangle.h
+OBJECTS=triangle.$(OBJ_EXT)
+
+all: libtriangle.$(STATIC_LIB_EXT) libtriangle.$(SHARED_LIB_EXT)
+
+objects: $(OBJECTS)
+
+shared: libtriangle.$(SHARED_LIB_EXT)
+
+static: libtriangle.$(STATIC_LIB_EXT)
+
+triangle.$(OBJ_EXT): $(SOURCES)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -c triangle.c
+
+libtriangle.a: $(OBJECTS)
+	$(AR) cr $@ $(OBJECTS)
+	$(RANLIB) $@
+
+libtriangle.dll: $(SOURCES)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -shared -o $@ triangle.c
+
+libtriangle.dylib: $(OBJECTS)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -dynamiclib -install_name ${INSTALL_DIR}/lib/$@ -o $@ triangle.c
+
+libtriangle.lib: $(OBJECTS)
+	lib -out:libtriangle.$(STATIC_LIB_EXT) $(OBJECTS)
+
+libtriangle.so: $(SOURCES)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -shared -o $@ triangle.c
+
+clean:
+	rm -rf *.$(LIB_EXT) *.$(OBJ_EXT) *.LIB *.LST *.$(OBJ_EXT)bj *.BAK
Index: /issm/trunk/externalpackages/triangle/configs/triangle.h
===================================================================
--- /issm/trunk/externalpackages/triangle/configs/triangle.h	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/configs/triangle.h	(revision 24686)
@@ -0,0 +1,296 @@
+/*****************************************************************************/
+/*                                                                           */
+/*  (triangle.h)                                                             */
+/*                                                                           */
+/*  Include file for programs that call Triangle.                            */
+/*                                                                           */
+/*  Accompanies Triangle Version 1.6                                         */
+/*  July 28, 2005                                                            */
+/*                                                                           */
+/*  Copyright 1996, 2005                                                     */
+/*  Jonathan Richard Shewchuk                                                */
+/*  2360 Woolsey #H                                                          */
+/*  Berkeley, California  94705-1927                                         */
+/*  jrs@cs.berkeley.edu                                                      */
+/*                                                                           */
+/*****************************************************************************/
+
+/*****************************************************************************/
+/*                                                                           */
+/*  How to call Triangle from another program                                */
+/*                                                                           */
+/*                                                                           */
+/*  If you haven't read Triangle's instructions (run "triangle -h" to read   */
+/*  them), you won't understand what follows.                                */
+/*                                                                           */
+/*  Triangle must be compiled into an object file (triangle.o) with the      */
+/*  TRILIBRARY symbol defined (generally by using the -DTRILIBRARY compiler  */
+/*  switch).  The makefile included with Triangle will do this for you if    */
+/*  you run "make trilibrary".  The resulting object file can be called via  */
+/*  the procedure triangulate().                                             */
+/*                                                                           */
+/*  If the size of the object file is important to you, you may wish to      */
+/*  generate a reduced version of triangle.o.  The REDUCED symbol gets rid   */
+/*  of all features that are primarily of research interest.  Specifically,  */
+/*  the -DREDUCED switch eliminates Triangle's -i, -F, -s, and -C switches.  */
+/*  The CDT_ONLY symbol gets rid of all meshing algorithms above and beyond  */
+/*  constrained Delaunay triangulation.  Specifically, the -DCDT_ONLY switch */
+/*  eliminates Triangle's -r, -q, -a, -u, -D, -Y, -S, and -s switches.       */
+/*                                                                           */
+/*  IMPORTANT:  These definitions (TRILIBRARY, REDUCED, CDT_ONLY) must be    */
+/*  made in the makefile or in triangle.c itself.  Putting these definitions */
+/*  in this file (triangle.h) will not create the desired effect.            */
+/*                                                                           */
+/*                                                                           */
+/*  The calling convention for triangulate() follows.                        */
+/*                                                                           */
+/*      void triangulate(triswitches, in, out, vorout)                       */
+/*      char *triswitches;                                                   */
+/*      struct triangulateio *in;                                            */
+/*      struct triangulateio *out;                                           */
+/*      struct triangulateio *vorout;                                        */
+/*                                                                           */
+/*  `triswitches' is a string containing the command line switches you wish  */
+/*  to invoke.  No initial dash is required.  Some suggestions:              */
+/*                                                                           */
+/*  - You'll probably find it convenient to use the `z' switch so that       */
+/*    points (and other items) are numbered from zero.  This simplifies      */
+/*    indexing, because the first item of any type always starts at index    */
+/*    [0] of the corresponding array, whether that item's number is zero or  */
+/*    one.                                                                   */
+/*  - You'll probably want to use the `Q' (quiet) switch in your final code, */
+/*    but you can take advantage of Triangle's printed output (including the */
+/*    `V' switch) while debugging.                                           */
+/*  - If you are not using the `q', `a', `u', `D', `j', or `s' switches,     */
+/*    then the output points will be identical to the input points, except   */
+/*    possibly for the boundary markers.  If you don't need the boundary     */
+/*    markers, you should use the `N' (no nodes output) switch to save       */
+/*    memory.  (If you do need boundary markers, but need to save memory, a  */
+/*    good nasty trick is to set out->pointlist equal to in->pointlist       */
+/*    before calling triangulate(), so that Triangle overwrites the input    */
+/*    points with identical copies.)                                         */
+/*  - The `I' (no iteration numbers) and `g' (.off file output) switches     */
+/*    have no effect when Triangle is compiled with TRILIBRARY defined.      */
+/*                                                                           */
+/*  `in', `out', and `vorout' are descriptions of the input, the output,     */
+/*  and the Voronoi output.  If the `v' (Voronoi output) switch is not used, */
+/*  `vorout' may be NULL.  `in' and `out' may never be NULL.                 */
+/*                                                                           */
+/*  Certain fields of the input and output structures must be initialized,   */
+/*  as described below.                                                      */
+/*                                                                           */
+/*****************************************************************************/
+
+/*****************************************************************************/
+/*                                                                           */
+/*  The `triangulateio' structure.                                           */
+/*                                                                           */
+/*  Used to pass data into and out of the triangulate() procedure.           */
+/*                                                                           */
+/*                                                                           */
+/*  Arrays are used to store points, triangles, markers, and so forth.  In   */
+/*  all cases, the first item in any array is stored starting at index [0].  */
+/*  However, that item is item number `1' unless the `z' switch is used, in  */
+/*  which case it is item number `0'.  Hence, you may find it easier to      */
+/*  index points (and triangles in the neighbor list) if you use the `z'     */
+/*  switch.  Unless, of course, you're calling Triangle from a Fortran       */
+/*  program.                                                                 */
+/*                                                                           */
+/*  Description of fields (except the `numberof' fields, which are obvious): */
+/*                                                                           */
+/*  `pointlist':  An array of point coordinates.  The first point's x        */
+/*    coordinate is at index [0] and its y coordinate at index [1], followed */
+/*    by the coordinates of the remaining points.  Each point occupies two   */
+/*    REALs.                                                                 */
+/*  `pointattributelist':  An array of point attributes.  Each point's       */
+/*    attributes occupy `numberofpointattributes' REALs.                     */
+/*  `pointmarkerlist':  An array of point markers; one int per point.        */
+/*                                                                           */
+/*  `trianglelist':  An array of triangle corners.  The first triangle's     */
+/*    first corner is at index [0], followed by its other two corners in     */
+/*    counterclockwise order, followed by any other nodes if the triangle    */
+/*    represents a nonlinear element.  Each triangle occupies                */
+/*    `numberofcorners' ints.                                                */
+/*  `triangleattributelist':  An array of triangle attributes.  Each         */
+/*    triangle's attributes occupy `numberoftriangleattributes' REALs.       */
+/*  `trianglearealist':  An array of triangle area constraints; one REAL per */
+/*    triangle.  Input only.                                                 */
+/*  `neighborlist':  An array of triangle neighbors; three ints per          */
+/*    triangle.  Output only.                                                */
+/*                                                                           */
+/*  `segmentlist':  An array of segment endpoints.  The first segment's      */
+/*    endpoints are at indices [0] and [1], followed by the remaining        */
+/*    segments.  Two ints per segment.                                       */
+/*  `segmentmarkerlist':  An array of segment markers; one int per segment.  */
+/*                                                                           */
+/*  `holelist':  An array of holes.  The first hole's x and y coordinates    */
+/*    are at indices [0] and [1], followed by the remaining holes.  Two      */
+/*    REALs per hole.  Input only, although the pointer is copied to the     */
+/*    output structure for your convenience.                                 */
+/*                                                                           */
+/*  `regionlist':  An array of regional attributes and area constraints.     */
+/*    The first constraint's x and y coordinates are at indices [0] and [1], */
+/*    followed by the regional attribute at index [2], followed by the       */
+/*    maximum area at index [3], followed by the remaining area constraints. */
+/*    Four REALs per area constraint.  Note that each regional attribute is  */
+/*    used only if you select the `A' switch, and each area constraint is    */
+/*    used only if you select the `a' switch (with no number following), but */
+/*    omitting one of these switches does not change the memory layout.      */
+/*    Input only, although the pointer is copied to the output structure for */
+/*    your convenience.                                                      */
+/*                                                                           */
+/*  `edgelist':  An array of edge endpoints.  The first edge's endpoints are */
+/*    at indices [0] and [1], followed by the remaining edges.  Two ints per */
+/*    edge.  Output only.                                                    */
+/*  `edgemarkerlist':  An array of edge markers; one int per edge.  Output   */
+/*    only.                                                                  */
+/*  `normlist':  An array of normal vectors, used for infinite rays in       */
+/*    Voronoi diagrams.  The first normal vector's x and y magnitudes are    */
+/*    at indices [0] and [1], followed by the remaining vectors.  For each   */
+/*    finite edge in a Voronoi diagram, the normal vector written is the     */
+/*    zero vector.  Two REALs per edge.  Output only.                        */
+/*                                                                           */
+/*                                                                           */
+/*  Any input fields that Triangle will examine must be initialized.         */
+/*  Furthermore, for each output array that Triangle will write to, you      */
+/*  must either provide space by setting the appropriate pointer to point    */
+/*  to the space you want the data written to, or you must initialize the    */
+/*  pointer to NULL, which tells Triangle to allocate space for the results. */
+/*  The latter option is preferable, because Triangle always knows exactly   */
+/*  how much space to allocate.  The former option is provided mainly for    */
+/*  people who need to call Triangle from Fortran code, though it also makes */
+/*  possible some nasty space-saving tricks, like writing the output to the  */
+/*  same arrays as the input.                                                */
+/*                                                                           */
+/*  Triangle will not free() any input or output arrays, including those it  */
+/*  allocates itself; that's up to you.  You should free arrays allocated by */
+/*  Triangle by calling the trifree() procedure defined below.  (By default, */
+/*  trifree() just calls the standard free() library procedure, but          */
+/*  applications that call triangulate() may replace trimalloc() and         */
+/*  trifree() in triangle.c to use specialized memory allocators.)           */
+/*                                                                           */
+/*  Here's a guide to help you decide which fields you must initialize       */
+/*  before you call triangulate().                                           */
+/*                                                                           */
+/*  `in':                                                                    */
+/*                                                                           */
+/*    - `pointlist' must always point to a list of points; `numberofpoints'  */
+/*      and `numberofpointattributes' must be properly set.                  */
+/*      `pointmarkerlist' must either be set to NULL (in which case all      */
+/*      markers default to zero), or must point to a list of markers.  If    */
+/*      `numberofpointattributes' is not zero, `pointattributelist' must     */
+/*      point to a list of point attributes.                                 */
+/*    - If the `r' switch is used, `trianglelist' must point to a list of    */
+/*      triangles, and `numberoftriangles', `numberofcorners', and           */
+/*      `numberoftriangleattributes' must be properly set.  If               */
+/*      `numberoftriangleattributes' is not zero, `triangleattributelist'    */
+/*      must point to a list of triangle attributes.  If the `a' switch is   */
+/*      used (with no number following), `trianglearealist' must point to a  */
+/*      list of triangle area constraints.  `neighborlist' may be ignored.   */
+/*    - If the `p' switch is used, `segmentlist' must point to a list of     */
+/*      segments, `numberofsegments' must be properly set, and               */
+/*      `segmentmarkerlist' must either be set to NULL (in which case all    */
+/*      markers default to zero), or must point to a list of markers.        */
+/*    - If the `p' switch is used without the `r' switch, then               */
+/*      `numberofholes' and `numberofregions' must be properly set.  If      */
+/*      `numberofholes' is not zero, `holelist' must point to a list of      */
+/*      holes.  If `numberofregions' is not zero, `regionlist' must point to */
+/*      a list of region constraints.                                        */
+/*    - If the `p' switch is used, `holelist', `numberofholes',              */
+/*      `regionlist', and `numberofregions' is copied to `out'.  (You can    */
+/*      nonetheless get away with not initializing them if the `r' switch is */
+/*      used.)                                                               */
+/*    - `edgelist', `edgemarkerlist', `normlist', and `numberofedges' may be */
+/*      ignored.                                                             */
+/*                                                                           */
+/*  `out':                                                                   */
+/*                                                                           */
+/*    - `pointlist' must be initialized (NULL or pointing to memory) unless  */
+/*      the `N' switch is used.  `pointmarkerlist' must be initialized       */
+/*      unless the `N' or `B' switch is used.  If `N' is not used and        */
+/*      `in->numberofpointattributes' is not zero, `pointattributelist' must */
+/*      be initialized.                                                      */
+/*    - `trianglelist' must be initialized unless the `E' switch is used.    */
+/*      `neighborlist' must be initialized if the `n' switch is used.  If    */
+/*      the `E' switch is not used and (`in->numberofelementattributes' is   */
+/*      not zero or the `A' switch is used), `elementattributelist' must be  */
+/*      initialized.  `trianglearealist' may be ignored.                     */
+/*    - `segmentlist' must be initialized if the `p' or `c' switch is used,  */
+/*      and the `P' switch is not used.  `segmentmarkerlist' must also be    */
+/*      initialized under these circumstances unless the `B' switch is used. */
+/*    - `edgelist' must be initialized if the `e' switch is used.            */
+/*      `edgemarkerlist' must be initialized if the `e' switch is used and   */
+/*      the `B' switch is not.                                               */
+/*    - `holelist', `regionlist', `normlist', and all scalars may be ignored.*/
+/*                                                                           */
+/*  `vorout' (only needed if `v' switch is used):                            */
+/*                                                                           */
+/*    - `pointlist' must be initialized.  If `in->numberofpointattributes'   */
+/*      is not zero, `pointattributelist' must be initialized.               */
+/*      `pointmarkerlist' may be ignored.                                    */
+/*    - `edgelist' and `normlist' must both be initialized.                  */
+/*      `edgemarkerlist' may be ignored.                                     */
+/*    - Everything else may be ignored.                                      */
+/*                                                                           */
+/*  After a call to triangulate(), the valid fields of `out' and `vorout'    */
+/*  will depend, in an obvious way, on the choice of switches used.  Note    */
+/*  that when the `p' switch is used, the pointers `holelist' and            */
+/*  `regionlist' are copied from `in' to `out', but no new space is          */
+/*  allocated; be careful that you don't free() the same array twice.  On    */
+/*  the other hand, Triangle will never copy the `pointlist' pointer (or any */
+/*  others); new space is allocated for `out->pointlist', or if the `N'      */
+/*  switch is used, `out->pointlist' remains uninitialized.                  */
+/*                                                                           */
+/*  All of the meaningful `numberof' fields will be properly set; for        */
+/*  instance, `numberofedges' will represent the number of edges in the      */
+/*  triangulation whether or not the edges were written.  If segments are    */
+/*  not used, `numberofsegments' will indicate the number of boundary edges. */
+/*                                                                           */
+/*****************************************************************************/
+
+/*Patch for ISSM*/
+#ifndef REAL
+typedef double REAL;
+typedef void VOID;
+#endif
+/*End patch*/
+
+struct triangulateio {
+  REAL *pointlist;                                               /* In / out */
+  REAL *pointattributelist;                                      /* In / out */
+  int *pointmarkerlist;                                          /* In / out */
+  int numberofpoints;                                            /* In / out */
+  int numberofpointattributes;                                   /* In / out */
+
+  int *trianglelist;                                             /* In / out */
+  REAL *triangleattributelist;                                   /* In / out */
+  REAL *trianglearealist;                                         /* In only */
+  int *neighborlist;                                             /* Out only */
+  int numberoftriangles;                                         /* In / out */
+  int numberofcorners;                                           /* In / out */
+  int numberoftriangleattributes;                                /* In / out */
+
+  int *segmentlist;                                              /* In / out */
+  int *segmentmarkerlist;                                        /* In / out */
+  int numberofsegments;                                          /* In / out */
+
+  REAL *holelist;                        /* In / pointer to array copied out */
+  int numberofholes;                                      /* In / copied out */
+
+  REAL *regionlist;                      /* In / pointer to array copied out */
+  int numberofregions;                                    /* In / copied out */
+
+  int *edgelist;                                                 /* Out only */
+  int *edgemarkerlist;            /* Not used with Voronoi diagram; out only */
+  REAL *normlist;                /* Used only with Voronoi diagram; out only */
+  int numberofedges;                                             /* Out only */
+};
+
+#ifdef ANSI_DECLARATORS
+extern "C" void triangulate(char *, struct triangulateio *, struct triangulateio *,
+                 struct triangulateio *);
+void trifree(VOID *memptr);
+#else /* not ANSI_DECLARATORS */
+void triangulate();
+void trifree();
+#endif /* not ANSI_DECLARATORS */
Index: /issm/trunk/externalpackages/triangle/configs/windows/configure.make
===================================================================
--- /issm/trunk/externalpackages/triangle/configs/windows/configure.make	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/configs/windows/configure.make	(revision 24686)
@@ -0,0 +1,6 @@
+CC=cl
+CSWITCHES= $(CFLAGS) -DNO_TIMER
+TRILIBDEFS= -DTRILIBRARY
+STATIC_LIB_EXT=lib
+SHARED_LIB_EXT=dll
+OBJ_EXT=obj
Index: sm/trunk/externalpackages/triangle/install-javascript.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-javascript.sh	(revision 24685)
+++ 	(revision )
@@ -1,31 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Source emscripten to ensure emcc/em++ compiler are in env
-source $ISSM_DIR/externalpackages/emscripten/install/emsdk_env.sh
-export CC=emcc
-export CXX=em++
-
-#Some cleanup 
-rm -rf install-javascript triangle
-mkdir install-javascript
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/triangle.zip' 'triangle.zip'
-
-#Untar 
-cd install-javascript
-cp ../triangle.zip ./
-unzip triangle.zip
-
-#copy new makefile
-cp ../configs/javascript/configure.make ./
-cp ../configs/javascript/makefile ./
-
-#Patch triangle.h
-patch triangle.h ../triangle.h.patch.js
-
-#Compile triangle
-make
-
-
Index: /issm/trunk/externalpackages/triangle/install-linux-javascript.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-linux-javascript.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-linux-javascript.sh	(revision 24686)
@@ -0,0 +1,48 @@
+#!/bin/bash
+set -eu
+
+
+# TODO:
+# - Revisit enviroment variables (especially EMCC_CFLAGS) once support for
+#	Fortran has been accomplished.
+#
+
+# Constants
+#
+INSTALL_DIR="install-javascript"
+
+# Environment
+#
+export CC=emcc
+export CXX=em++
+export AR=emar
+export RANLIB=emranlib
+#export EMCC_DEBUG=1 # Uncomment to enable debugging
+export EMCC_CFLAGS="-s ERROR_ON_UNDEFINED_SYMBOLS=0" # Required after v1.38.14 to avoid undefined symbol warnings from our Fortran object files being treated as errors
+
+# Source Emscripten environment
+source ${ISSM_DIR}/externalpackages/emscripten/install/emsdk_env.sh
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/share src
+
+# Download source
+${ISSM_DIR}/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/javascript/configure.make src
+cp configs/javascript/triangle.h src
+
+# Compile
+cd src
+make objects
+
+# Install
+cd ..
+cp src/triangle.o ${INSTALL_DIR}/share
+cp src/triangle.h ${INSTALL_DIR}/include
Index: /issm/trunk/externalpackages/triangle/install-linux-static.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-linux-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-linux-static.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+INSTALL_DIR="install"
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/linux/configure.make src
+
+# Compile
+cd src
+make static
+
+# Install
+cd ..
+cp src/libtriangle.* ${INSTALL_DIR}/lib
+cp src/triangle.h ${INSTALL_DIR}/include
+
+# Cleanup
+rm -rf src
Index: /issm/trunk/externalpackages/triangle/install-linux.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-linux.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-linux.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+INSTALL_DIR="install"
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/linux/configure.make src
+
+# Compile
+cd src
+make shared
+
+# Install
+cd ..
+cp src/libtriangle.* ${INSTALL_DIR}/lib
+cp src/triangle.h ${INSTALL_DIR}/include
+
+# Cleanup
+rm -rf src
Index: sm/trunk/externalpackages/triangle/install-linux64.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-linux64.sh	(revision 24685)
+++ 	(revision )
@@ -1,24 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup 
-rm -rf install triangle
-mkdir install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/triangle.zip' 'triangle.zip'
-
-#Untar 
-cd install
-cp ../triangle.zip ./
-unzip triangle.zip
-
-#copy new makefile
-cp ../configs/linux64/configure.make ./
-cp ../makefile ./
-
-#Compile triangle
-make
-
-#Patch triangle.h
-patch triangle.h ../triangle.h.patch
Index: /issm/trunk/externalpackages/triangle/install-mac-static.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-mac-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-mac-static.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+INSTALL_DIR="install"
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/mac/configure.make src
+
+# Compile
+cd src
+make static
+
+# Install
+cd ..
+cp src/libtriangle.* ${INSTALL_DIR}/lib
+cp src/triangle.h ${INSTALL_DIR}/include
+
+# Cleanup
+rm -rf src
Index: /issm/trunk/externalpackages/triangle/install-mac.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-mac.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-mac.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+export INSTALL_DIR="${ISSM_DIR}/externalpackages/triangle/install"
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/mac/configure.make src
+
+# Compile
+cd src
+make shared
+
+# Install
+cd ..
+cp src/libtriangle.* ${INSTALL_DIR}/lib
+cp src/triangle.h ${INSTALL_DIR}/include
+
+# Cleanup
+rm -rf src
Index: sm/trunk/externalpackages/triangle/install-macosx32.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-macosx32.sh	(revision 24685)
+++ 	(revision )
@@ -1,24 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup 
-rm -rf install triangle
-mkdir install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/triangle.zip' 'triangle.zip'
-
-#Untar 
-cd install
-cp ../triangle.zip ./
-unzip triangle.zip
-
-#copy new makefile
-cp ../configs/macosx32/configure.make ./
-cp ../makefile ./
-
-#Compile triangle
-make
-
-#Patch triangle.h
-patch triangle.h ../triangle.h.patch
Index: sm/trunk/externalpackages/triangle/install-macosx64-snowleopard.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-macosx64-snowleopard.sh	(revision 24685)
+++ 	(revision )
@@ -1,24 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup 
-rm -rf install triangle
-mkdir install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/triangle.zip' 'triangle.zip'
-
-#Untar 
-cd install
-cp ../triangle.zip ./
-unzip triangle.zip
-
-#copy new makefile
-cp ../configs/macosx64_snowleopard/configure.make ./
-cp ../makefile ./
-
-#Compile triangle
-make
-
-#Patch triangle.h
-patch triangle.h ../triangle.h.patch
Index: sm/trunk/externalpackages/triangle/install-macosx64.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-macosx64.sh	(revision 24685)
+++ 	(revision )
@@ -1,24 +1,0 @@
-#!/bin/bash
-set -eu
-
-#Some cleanup 
-rm -rf install triangle
-mkdir install
-
-#Download from ISSM server
-$ISSM_DIR/scripts/DownloadExternalPackage.sh 'https://issm.ess.uci.edu/files/externalpackages/triangle.zip' 'triangle.zip'
-
-#Untar 
-cd install
-cp ../triangle.zip ./
-unzip triangle.zip
-
-#copy new makefile
-cp ../configs/macosx64/configure.make ./
-cp ../makefile ./
-
-#Compile triangle
-make
-
-#Patch triangle.h
-patch triangle.h ../triangle.h.patch
Index: /issm/trunk/externalpackages/triangle/install-windows-static.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-windows-static.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-windows-static.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+INSTALL_DIR="install"
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/windows/configure.make src
+
+# Compile
+cd src
+make static
+
+# Install
+cd ..
+cp src/libtriangle.* ${INSTALL_DIR}/lib
+cp src/triangle.h ${INSTALL_DIR}/include
+
+# Cleanup
+rm -rf src
Index: /issm/trunk/externalpackages/triangle/install-windows.sh
===================================================================
--- /issm/trunk/externalpackages/triangle/install-windows.sh	(revision 24686)
+++ /issm/trunk/externalpackages/triangle/install-windows.sh	(revision 24686)
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+INSTALL_DIR="install"
+
+# Cleanup
+rm -rf ${INSTALL_DIR} src
+mkdir ${INSTALL_DIR} ${INSTALL_DIR}/include ${INSTALL_DIR}/lib src
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/triangle.zip" "triangle.zip"
+
+# Unpack source
+unzip triangle.zip -d src
+
+# Copy customized source files to 'src' directory
+cp configs/makefile src
+cp configs/triangle.h src
+cp configs/windows/configure.make src
+
+# Compile
+cd src
+make shared
+
+# Install
+cd ..
+cp src/libtriangle.* ${INSTALL_DIR}/lib
+cp src/triangle.h ${INSTALL_DIR}/include
+
+# Cleanup
+rm -rf src
Index: /issm/trunk/externalpackages/triangle/makefile
===================================================================
--- /issm/trunk/externalpackages/triangle/makefile	(revision 24685)
+++ /issm/trunk/externalpackages/triangle/makefile	(revision 24686)
@@ -1,5 +1,2 @@
-#Adaptation of Triangle makefile to compile only the triangle.c file, 
-#into a standalone library that can then be used to compile mex modules
-
 include ./configure.make
 
@@ -16,17 +13,34 @@
 endif
 
-TRILIBDEFS = -DTRILIBRARY
-
-all: triangle.$(LIB_EXT)
-
+SOURCES=triangle.c triangle.h
 OBJECTS=triangle.$(OBJ_EXT)
 
-triangle.$(LIB_EXT): $(OBJECTS)
-	$(AR) cr  triangle.$(LIB_EXT) $(OBJECTS)
-	$(RANLIB) triangle.$(LIB_EXT)
+all: libtriangle.$(STATIC_LIB_EXT) libtriangle.$(SHARED_LIB_EXT)
 
-triangle.$(OBJ_EXT): triangle.c triangle.h
+objects: $(OBJECTS)
+
+shared: libtriangle.$(SHARED_LIB_EXT)
+
+static: libtriangle.$(STATIC_LIB_EXT)
+
+triangle.$(OBJ_EXT): $(SOURCES)
 	$(CC) $(CSWITCHES) $(TRILIBDEFS) -c triangle.c
 
-clean: 
+libtriangle.a: $(OBJECTS)
+	$(AR) cr $@ $(OBJECTS)
+	$(RANLIB) $@
+
+libtriangle.dll: $(SOURCES)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -shared -o $@ triangle.c
+
+libtriangle.dylib: $(OBJECTS)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -dynamiclib -install_name $(PREFIX)/$@ -o $@ triangle.c
+
+libtriangle.lib: $(OBJECTS)
+	lib -out:libtriangle.$(STATIC_LIB_EXT) $(OBJECTS)
+
+libtriangle.so: $(SOURCES)
+	$(CC) $(CSWITCHES) $(TRILIBDEFS) -shared -o $@ triangle.c
+
+clean:
 	rm -rf *.$(LIB_EXT) *.$(OBJ_EXT) *.LIB *.LST *.$(OBJ_EXT)bj *.BAK
Index: /issm/trunk/externalpackages/zlib/install-1.2.sh
===================================================================
--- /issm/trunk/externalpackages/zlib/install-1.2.sh	(revision 24686)
+++ /issm/trunk/externalpackages/zlib/install-1.2.sh	(revision 24686)
@@ -0,0 +1,38 @@
+#!/bin/bash
+set -eu
+
+
+# Constants
+#
+VER="1.2.11"
+
+# Download source
+$ISSM_DIR/scripts/DownloadExternalPackage.sh "https://issm.ess.uci.edu/files/externalpackages/zlib-${VER}.tar.gz" "zlib-${VER}.tar.gz"
+
+# Unpack source
+tar -zxvf zlib-$VER.tar.gz
+
+# Cleanup
+rm -rf install src
+mkdir install src
+
+# Move source to 'src' directory
+mv zlib-$VER/* src/
+rm -rf zlib-$VER
+
+# Configure
+cd src
+./configure \
+ 	--prefix="${ISSM_DIR}/externalpackages/zlib/install"
+
+# Compile and install
+if [ $# -eq 0 ]; then
+	make
+	make install
+else
+	make -j $1
+	make -j $1 install
+fi
+
+# Return to initial directory
+cd ..
Index: /issm/trunk/jenkins/jenkins.sh
===================================================================
--- /issm/trunk/jenkins/jenkins.sh	(revision 24685)
+++ /issm/trunk/jenkins/jenkins.sh	(revision 24686)
@@ -16,5 +16,5 @@
 rm -rf $ISSM_DIR/execution/*
 rm -rf $ISSM_DIR/nightlylog
-mkdir  $ISSM_DIR/nightlylog
+mkdir $ISSM_DIR/nightlylog
 
 #Server URI
@@ -31,13 +31,19 @@
 if [ ! -f "$1" ]
 then
-	echo "File $1 not found!" >&2   # Error message to stderr.
+	echo "File $1 not found!" >&2 # Error message to stderr.
 	exit 1
 fi
 
-#Defaults (to avoid -eq: unary operator expected)
-EXAMPLES_TEST=0
+# Initialize test suite variables (to avoid "-eq: unary operator expected")
 MATLAB_TEST=0
 PYTHON_TEST=0
 JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Initialize resource variables (to avoid "i<=: syntax error: operand expected"
+# in for loops)
+NUMCPUS_INSTALL=1
+NUMCPUS_RUN=1
+
 
 #source configuration script
@@ -61,5 +67,5 @@
 	SVN_CURRENT=$SVN_REVISION_1
 	echo "Previous revision number: $SVN_PREVIOUS"
-	echo "Current  revision number: $SVN_CURRENT"
+	echo "Current revision number: $SVN_CURRENT"
 
 	# Get list of changed files
@@ -76,5 +82,5 @@
 	#cat changes | grep 'document_edit' |sed -e 's/document_edit.png/document_edit.png\
 		#/g' | sed -e 's/.*<\/a><\/td><td>\(.*\)<\/td><\/tr>.*/\1/' | grep -v 'document_edit.png' > $ISSM_DIR/TEMP
-	cat changes  | tr " " "\n" | grep trunk | sed -e 's/.*<a>\(.*\)<\/a>.*/\1/' > $ISSM_DIR/TEMP
+	cat changes | tr " " "\n" | grep trunk | sed -e 's/.*<a>\(.*\)<\/a>.*/\1/' > $ISSM_DIR/TEMP
 
 	# Print list of changed files
@@ -140,8 +146,10 @@
 echo $SVN_REVISION_1 > $ISSM_DIR/svn_revision_old
 #}}}
-#Install external packages    (ISSM_EXTERNALPACKAGES){{{
-
-#number of packages:
-NUMPACKAGES=$(($(echo $EXTERNALPACKAGES | wc -w )/2))
+
+## External Packages
+#
+
+# Number of packages
+NUMPACKAGES=$(($(echo ${EXTERNALPACKAGES} | wc -w ) / 2))
 
 #Jenkins xml files for individual packages
@@ -219,10 +227,21 @@
 fi
 
-#Set CXX/CC flags for JS runs after exnteralpackages to avoid conflicts during their compilation
+# Set CXX/CC flags for JS runs after exnteralpackages to avoid conflicts during their compilation
+#
+# TODO:
+# - Check a different boolean variable as compiling for JavaScript should be
+#	independent from running JavaScript tests (one should be able to do the
+#	former without having to do the latter).
+# - Revisit enviroment variables (especially EMCC_CFLAGS) once support for
+#	Fortran has been accomplished.
 CXX_PREVIOUS=$CXX
 CC_PREVIOUS=$CC
 if [ $JAVASCRIPT_TEST -eq 1 ]; then
+	export CC=emcc
 	export CXX=em++
-	export CC=emcc
+	export AR=emar
+	export RANLIB=emranlib
+	#export EMCC_DEBUG=1 # Uncomment to enable debugging
+	export EMCC_CFLAGS="-s ERROR_ON_UNDEFINED_SYMBOLS=0" # Required after v1.38.14 to avoid undefined symbol warnings from our Fortran object files being treated as errors
 	source $ISSM_DIR/externalpackages/emscripten/install/emsdk_env.sh
 fi
@@ -298,6 +317,5 @@
 	warning off %necessary to avoid a log of several Go for parallel runs
 	try,
-	$(if [ "$MATLAB_NROPTIONS" = ""  ]
-	then
+	$(if [ "$MATLAB_NROPTIONS" = "" ]; then
 		echo "runme('output','nightly','rank',$i,'numprocs',$NUMCPUS_RUN);"
 	else
@@ -357,6 +375,6 @@
 for job in `jobs -p`
 do
-echo "Waiting on: $job"
-    wait $job
+	echo "Waiting on: $job"
+	wait $job
 done
 
@@ -394,6 +412,6 @@
 	for job in `jobs -p`
 	do
-	echo "Waiting on: $job"
-	    wait $job
+		echo "Waiting on: $job"
+		wait $job
 	done
 
@@ -421,9 +439,9 @@
 		if [ -d "${dir}" ];
 		then
-		# Some of the examples are incomplete (on purpose). As such, we will have to populate the
-		# missing steps in order to make sure that everything is working.
+		# Some of the examples are incomplete (on purpose). As such, we will
+		# have to populate the missing steps in order to make sure that
+		# everything is working.
 			echo "Testing directory example: $(basename $dir)"
 
-			# Greenland is missing step 8
 			if [ -z "$SED" ];
 			then
@@ -435,47 +453,48 @@
 			if [ "${dir}" == "./Greenland" ];
 			then
+				# Greenland is missing step 8
 				STEP_EIGHT="\n	disp('   Step 8: Plotting exercise');\n	md = loadmodel('.\/Models\/Greenland.HistoricTransient');\n	figure\n	surfmb=[]; for i=2:201; surfmb=[surfmb ...\n		md.results.TransientSolution(i).SmbMassBalance]; end\n	subplot(3,1,1); plot([1:200],mean(surfmb));\n	title('Mean Surface mass balance');\n	vel=[]; for i=2:201; vel=[vel md.results.TransientSolution(i).Vel]; end\n	subplot(3,1,2); plot([1:200],mean(vel));\n	title('Mean Velocity');\n	volume=[]; for i=2:201; volume=[volume md.results.TransientSolution(i).IceVolume]; end\n	subplot(3,1,3); plot([1:200],volume);\n	title('Ice Volume'); xlabel('years');"
 
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:8\];\n\ntry\n/' $FILE
-				$SED -i.bak "s/if any(steps==8)/&${STEP_EIGHT}/" $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:8\];\n\ntry\n/' $FILE
+				$SED -i .bak "s/if any(steps==8)/&${STEP_EIGHT}/" $FILE
 			elif [ "${dir}" == "./IceBridge" ];
 			then
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:5\];\n\ntry\n/' $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:5\];\n\ntry\n/' $FILE
 			elif [ "${dir}" == "./IceflowModels" ];
 			then
 				# Almost nothing to this example
-				$SED -i.bak '1 s/^.*$/try\n\n&/' $FILE
+				$SED -i .bak '1 s/^.*$/try\n\n&/' $FILE
 			elif [ "${dir}" == "./ISMIP" ];
 			then
 				# Eight steps... none of which are implmented in the script...
-				$SED -i.bak '1 s/^.*$/try\n\n&/' $FILE
+				$SED -i .bak '1 s/^.*$/try\n\n&/' $FILE
 			elif [ "${dir}" == "./Inversion" ];
 			then
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
 			elif [ "${dir}" == "./Jakobshavn" ];
 			then
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
 			elif [ "${dir}" == "./Jakobshavn" ];
 			then
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
 			elif [ "${dir}" == "./Pig" ];
 			then
 				# Step 6 is needed
 				STEP_SIX="\n disp('Needs work!')"
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:7\];\n!mv DomainOutline.bkp DomainOutline.exp;\n\ntry\n/' $FILE
-				$SED -i.bak "s/if any(steps==6)/&${STEP_SIX}/" $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:7\];\n!mv DomainOutline.bkp DomainOutline.exp;\n\ntry\n/' $FILE
+				$SED -i .bak "s/if any(steps==6)/&${STEP_SIX}/" $FILE
 			elif [ "${dir}" == "./PigSensitivity" ];
 			then
 				# Step 4 is needed
 				STEP_FOUR="\n disp('Needs work!')"
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
-				$SED -i.bak "s/if any(steps==6)/&${STEP_FOUR}/" $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:4\];\n\ntry\n/' $FILE
+				$SED -i .bak "s/if any(steps==6)/&${STEP_FOUR}/" $FILE
 			elif [ "${dir}" == "./SquareIceShelf" ];
 			then
 				# Almost nothing to this example
-				$SED -i.bak '1 s/^.*$/try\n\n&/' $FILE
+				$SED -i .bak '1 s/^.*$/try\n\n&/' $FILE
 			elif [ "${dir}" == "./UncertaintyQuantification" ];
 			then
-				$SED -i.bak 's/steps=\[1\];/steps=\[1:7\];\n\ntry\n/' $FILE
+				$SED -i .bak 's/steps=\[1\];/steps=\[1:7\];\n\ntry\n/' $FILE
 			elif [ "${dir}" == "./Data" ];
 			then
@@ -483,5 +502,5 @@
 			else
 				echo "Not implemented yet!"
-				$SED -i.bak '1 s/^.*$/try\n\n&/' $FILE
+				$SED -i .bak '1 s/^.*$/try\n\n&/' $FILE
 			fi
 
@@ -517,29 +536,44 @@
 if [ $MATLAB_TEST -eq 1 ]; then
 	#number tests:
-	numtests=`cat matlab_log.log  | grep "\-\-\-\-\-\-\-\-starting" | wc -l`
-	testlist=`cat matlab_log.log  | grep "\-\-\-\-\-\-\-\-starting" | sed 's/----------------starting://g'  | sed 's/-//g'`
+	numtests=`cat matlab_log.log | grep "\-\-\-\-\-\-\-\-starting" | wc -l`
+	testlist=`cat matlab_log.log | grep "\-\-\-\-\-\-\-\-starting" | sed 's/----------------starting://g' | sed 's/-//g'`
 
 	#look through numtests:
 	for i in `echo $testlist`
 	do
-		juLog  -test=MATLAB-$i -name=Error -error=ERROR awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" matlab_log.log
-		juLog  -test=MATLAB-$i -name=Failure -error=FAILURE awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" matlab_log.log
-	done
+		juLog -test=MATLAB-$i -name=Error -error=ERROR awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" matlab_log.log
+		juLog -test=MATLAB-$i -name=Failure -error=FAILURE awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" matlab_log.log
+	done
+
+	# Check that MATLAB did not exit in error
+	matlabExitedInError=`grep -E "Activation cannot proceed|license" matlab_log.log | wc -l`
+
+	if [ $matlabExitedInError -ne 0 ]
+	then
+		echo "----------MATLAB exited in error!----------"
+		cat matlab_log.log
+		echo "-----------End of matlab_log.log-----------"
+
+		# Clean up execution directory
+		rm -rf $ISSM_DIR/execution/*
+
+		exit 1
+	fi
 fi
 
 if [ $PYTHON_TEST -eq 1 ]; then
 	#number tests:
-	numtests=`cat python_log.log  | grep "\-\-\-\-\-\-\-\-starting" | wc -l`
-	testlist=`cat python_log.log  | grep "\-\-\-\-\-\-\-\-starting" | sed 's/----------------starting://g'  | sed 's/-//g'`
+	numtests=`cat python_log.log | grep "\-\-\-\-\-\-\-\-starting" | wc -l`
+	testlist=`cat python_log.log | grep "\-\-\-\-\-\-\-\-starting" | sed 's/----------------starting://g' | sed 's/-//g'`
 
 	#look through numtests:
 	for i in `echo $testlist`
 	do
-		juLog  -test=PYTHON-$i -name=Error -error=ERROR awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" python_log.log
-		juLog  -test=PYTHON-$i -name=Failure -error=FAILURE awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" python_log.log
+		juLog -test=PYTHON-$i -name=Error -error=ERROR awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" python_log.log
+		juLog -test=PYTHON-$i -name=Failure -error=FAILURE awk "/starting:$i/{flag=1;next}/finished/{flag=0} flag{print}" python_log.log
 	done
 
 	# Check that Python did not exit in error
-	pythonExitedInError=`grep -E "Traceback|bad interpreter" python_log.log | wc -l`
+	pythonExitedInError=`grep -E "Error|Traceback|bad interpreter" python_log.log | wc -l`
 
 	if [ $pythonExitedInError -ne 0 ]
@@ -562,13 +596,13 @@
 then
 	# Inexplicably, there are backspace chars in the error output that are causing issues
-	$SED -i.bak 's///g' matlab_log_examples.log
-
-	numtests=`cat matlab_log_examples.log  | grep "starting: " | wc -l`
-	testlist=`cat matlab_log_examples.log   | grep "starting: " | sed 's/starting: //'`
+	$SED -i .bak 's///g' matlab_log_examples.log
+
+	numtests=`cat matlab_log_examples.log | grep "starting: " | wc -l`
+	testlist=`cat matlab_log_examples.log | grep "starting: " | sed 's/starting: //'`
 
 	echo "Processing: $numtests"
 	for i in `echo $testlist`
 	do
-		juLog  -test=Example-$i -name=Error -error=FAILURE awk "/starting: $i/{flag=1;next}/finished: $i/{flag=0} flag{print}" matlab_log_examples.log
+		juLog -test=Example-$i -name=Error -error=FAILURE awk "/starting: $i/{flag=1;next}/finished: $i/{flag=0} flag{print}" matlab_log_examples.log
 	done
 fi
Index: sm/trunk/jenkins/linux64_ross
===================================================================
--- /issm/trunk/jenkins/linux64_ross	(revision 24685)
+++ 	(revision )
@@ -1,66 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--disable-static \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9 -lgfortran" \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-chaco-dir="$ISSM_DIR/externalpackages/chaco/install" \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-semic-dir=$ISSM_DIR/externalpackages/semic/install \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external packages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						cmake        install.sh
-						mpich         install-3.2-linux64.sh
-						petsc         install-3.7-linux64.sh
-						triangle      install-linux64.sh
-						chaco         install.sh
-						m1qn3         install.sh
-						hdf5          install.sh
-						netcdf        install.sh
-						semic         install.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=10
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=5
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-PYTHON_NROPTIONS=""
-MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota')]"
Index: sm/trunk/jenkins/linux64_ross_ad
===================================================================
--- /issm/trunk/jenkins/linux64_ross_ad	(revision 24685)
+++ 	(revision )
@@ -1,78 +1,0 @@
-#
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-				--disable-static \
-				--without-kriging \
-				--without-kml \
-				--without-GiaIvins \
-				--without-Love \
-				--with-matlab-dir=$MATLAB_PATH \
-				--with-python-dir=/usr \
-				--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
-				--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9 -lgfortran" \
-				--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-				--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi -lmpicxx -lmpifort" \
-				--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-				--with-gsl-dir=$ISSM_DIR/externalpackages/gsl/install \
-				--with-adolc-dir=$ISSM_DIR/externalpackages/adolc/install \
-				--with-numthreads=4 \
-				--enable-development \
-				--enable-debugging'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-#
-# NOTE: The PETSc libraries are compiled but not used as they conflict with
-#		ADOL-C: PETSc is really just being used as an installer for other
-#		external packages.
-EXTERNALPACKAGES="autotools install.sh
-					 cmake install.sh
-					 mpich install-3.2-linux64.sh
-					 petsc install-3.7-linux64.sh
-					 triangle install-linux64.sh
-					 gsl install-linux64.sh
-					 adolc install.sh
-					 shell2junit install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-# Number of CPUs used in ISSM compilation
-#
-# NOTE: One is usually safer as some packages are very sensitive to parallel
-# 		compilation.
-#
-NUMCPUS_INSTALL=5
-
-# Number of CPUs used in the nightly runs
-NUMCPUS_RUN=4
-
-# Nightly run options
-#
-# See documentation in test/NightlyRun/runme.* for more information.
-#
-# NOTE:
-# - test3010 is skipped because it triggers GEMB module and a subsequent
-#	failure on ampioff configuration (really, likely a segmentation fault)
-#
-MATLAB_NROPTIONS="'benchmark','adolc','id',[3001:3019],'exclude',3010"
-PYTHON_NROPTIONS="--benchmark=adolc -i 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 --exclude=3010"
Index: sm/trunk/jenkins/linux64_ross_ampi
===================================================================
--- /issm/trunk/jenkins/linux64_ross_ampi	(revision 24685)
+++ 	(revision )
@@ -1,80 +1,0 @@
-#
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-				--disable-static \
-				--without-kriging \
-				--without-kml \
-				--without-GiaIvins \
-				--without-Love \
-				--with-matlab-dir=$MATLAB_PATH \
-				--with-python-dir=/usr \
-				--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
-				--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9 -lgfortran" \
-				--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-				--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi -lmpicxx -lmpifort" \
-				--with-ampi-dir=$ISSM_DIR/externalpackages/adjoinablempi/install \
-				--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-				--with-gsl-dir=$ISSM_DIR/externalpackages/gsl/install \
-				--with-adolc-dir=$ISSM_DIR/externalpackages/adolc/install \
-				--with-numthreads=4 \
-				--enable-development \
-				--enable-debugging'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-#
-# NOTE: The PETSc libraries are compiled but not used as they conflict with
-#		ADOL-C: PETSc is really just being used as an installer for other
-#		external packages.
-EXTERNALPACKAGES="autotools install.sh
-					 cmake install.sh
-					 mpich install-3.2-linux64.sh
-					 petsc install-3.7-linux64.sh
-					 triangle install-linux64.sh
-					 gsl install-linux64.sh
-					 adjoinablempi install.sh
-					 adolc install-withampi.sh
-					 shell2junit install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-# Number of CPUs used in ISSM compilation
-#
-# NOTE: One is usually safer as some packages are very sensitive to parallel
-# 		compilation.
-#
-NUMCPUS_INSTALL=5
-
-# Number of CPUs used in the nightly runs
-NUMCPUS_RUN=4
-
-# Nightly run options
-#
-# See documentation in test/NightlyRun/runme.* for more information.
-#
-# NOTE:
-# - test3010 is skipped because it triggers GEMB module and a subsequent
-#	failure on ampioff configuration (really, likely a segmentation fault)
-#
-MATLAB_NROPTIONS="'benchmark','adolc','id',[3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3119],'exclude',3010"
-PYTHON_NROPTIONS="--benchmark=adolc -i 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3101 3102 3103 3104 3105 3106 3107 3108 3109 3119 --exclude=3010"
Index: sm/trunk/jenkins/linux64_ross_codi
===================================================================
--- /issm/trunk/jenkins/linux64_ross_codi	(revision 24685)
+++ 	(revision )
@@ -1,69 +1,0 @@
-#
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR\
-				--without-kriging \
-				--without-kml \
-				--without-GiaIvins \
-				--without-Love \
-				--with-gsl-dir=$ISSM_DIR/externalpackages/gsl/install \
-				--with-matlab-dir=$MATLAB_PATH \
-				--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-				--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpifort -lmpi" \
-				--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-				--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-				--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-				--with-numthreads=4  \
-				--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-				--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9/ -lgfortran" \
-				--with-m1qn3-dir="$ISSM_DIR/externalpackages/m1qn3/install" \
-				--with-codipack-dir="$ISSM_DIR/externalpackages/codipack/install" \
-				--with-medipack-dir="$ISSM_DIR/externalpackages/medipack/install" \
-				--enable-tape-alloc \
-				--enable-development \
-				--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools   install.sh
-                  cmake       install.sh
-                  mpich       install-3.2-linux64.sh
-                  petsc       install-3.7-linux64.sh
-                  triangle    install-linux64.sh
-                  gsl         install-linux64.sh
-                  m1qn3       install.sh
-                  medipack    install.sh
-                  codipack    install.sh
-                  shell2junit install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=5
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=1
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-MATLAB_NROPTIONS="'benchmark','all','id',[3015,3119,3480]"
-PYTHON_NROPTIONS="--benchmark='all' -i 3015 3119 3480"
Index: sm/trunk/jenkins/linux64_ross_dakota
===================================================================
--- /issm/trunk/jenkins/linux64_ross_dakota	(revision 24685)
+++ 	(revision )
@@ -1,69 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR\
-	--disable-static \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi -lmpicxx -lmpifort" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-chaco-dir=$ISSM_DIR/externalpackages/chaco/install \
-	--with-dakota-dir=$ISSM_DIR/externalpackages/dakota/install \
-	--with-boost-dir=$ISSM_DIR/externalpackages/boost/install \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9 -lgfortran" \
-	--with-cxxoptflags="-std=c++11" \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						cmake        install.sh
-						mpich         install-3.2-linux64.sh
-						petsc         install-3.7-linux64.sh
-						triangle      install-linux64.sh
-						boost         install-1.55-linux.sh
-						dakota        install-6.2-linux64.sh
-						chaco         install.sh
-						m1qn3         install.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=5
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=3
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-PYTHON_NROPTIONS="--exclude 243 701 702 435 --include_name 'Dakota'"
-MATLAB_NROPTIONS="'exclude',[243,701,702,435],'id',[IdFromString('Dakota')]"
Index: sm/trunk/jenkins/linux64_ross_dakota_static
===================================================================
--- /issm/trunk/jenkins/linux64_ross_dakota_static	(revision 24685)
+++ 	(revision )
@@ -1,67 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR\
-	--enable-standalone-executables \
-	--enable-standalone-modules \
-	--enable-standalone-libraries \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="$ISSM_DIR/externalpackages/mpich/install/lib/libmpifort.a $ISSM_DIR/externalpackages/mpich/install/lib/libmpi.a -lrt -lpthread" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-chaco-dir=$ISSM_DIR/externalpackages/chaco/install \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-boost-dir=$ISSM_DIR/externalpackages/boost/install \
-	--with-dakota-dir=$ISSM_DIR/externalpackages/dakota/install \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu -lgfortran" \
-	--with-numthreads=4 \
-	--with-pic'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=0
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						cmake        install.sh
-						chaco         install.sh
-						mpich         install-3.2-linux64-static.sh
-						m1qn3         install.sh
-						petsc         install-3.7-linux64-static.sh
-						triangle      install-linux64.sh
-						boost         install-1.55-linux64-static.sh
-						dakota        install-6.2-linux64-static.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=4
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=4
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-#PYTHON_NROPTIONS=""
-#MATLAB_NROPTIONS=""
Index: sm/trunk/jenkins/linux64_ross_gia
===================================================================
--- /issm/trunk/jenkins/linux64_ross_gia	(revision 24685)
+++ 	(revision )
@@ -1,64 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION 
-ISSM_CONFIG='--prefix=$ISSM_DIR\
-	--disable-static \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-python-dir=/usr\
-	--with-python-numpy-dir=/usr/lib/python2.7/dist-packages/numpy\
-	--with-math77-dir=$ISSM_DIR/externalpackages/math77/install \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9/ -lgfortran" \
-	--with-gia=yes \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-					 	cmake 		  install.sh
-						mpich         install-3.2-linux64.sh    
-						petsc         install-3.7-linux64.sh    
-						triangle      install-linux64.sh        
-						math77        install.sh
-						gmsh          install.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=8
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=1
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-MATLAB_NROPTIONS="'benchmark','all','id',[2001:2100]"
-PYTHON_NROPTIONS=""
Index: sm/trunk/jenkins/linux64_ross_iceocean
===================================================================
--- /issm/trunk/jenkins/linux64_ross_iceocean	(revision 24685)
+++ 	(revision )
@@ -1,59 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION 
-ISSM_CONFIG='--prefix=$ISSM_DIR\
-	--disable-static \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9/ -lgfortran" \
-	--with-ocean=yes \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh    
-						cmake         install.sh                
-						mpich      	  install-3.2-linux64.sh                
-						petsc         install-3.7-linux64.sh    
-						triangle      install-linux64.sh        
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=8
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=1
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-MATLAB_NROPTIONS="'benchmark','all','id',[4001 4002 4003]"
-PYTHON_NROPTIONS=""
Index: sm/trunk/jenkins/linux64_ross_javascript
===================================================================
--- /issm/trunk/jenkins/linux64_ross_javascript	(revision 24685)
+++ 	(revision )
@@ -1,55 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='\
-    --prefix="${ISSM_DIR}" \
-    --disable-shared \
-    --with-javascript \
-    --without-Love \
-    --without-kml \
-    --without-kriging \
-    --with-triangle-dir="${ISSM_DIR}/externalpackages/triangle/install-javascript" \
-    --with-gsl-dir="$ISSM_DIR/externalpackages/gsl/install-javascript"\
-'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=0
-PYTHON_TEST=0
-JAVASCRIPT_TEST=1
-
-# Environment
-#export EMCC_DEBUG=1 # Uncomment to enable debugging
-export EMCC_CFLAGS="-s ERROR_ON_UNDEFINED_SYMBOLS=0" # Required after v1.38.14
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						cmake         install.sh
-						emscripten    install.sh
-						gsl           install-javascript.sh
-						triangle      install-javascript.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=10
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=5
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-#PYTHON_NROPTIONS="--exclude 119 243 514 701 702 703 435 --exclude_name 'Dakota'"
-#MATLAB_NROPTIONS="'exclude',[119,243,514,701,702,435,IdFromString('Dakota')]"
Index: sm/trunk/jenkins/linux64_ross_python
===================================================================
--- /issm/trunk/jenkins/linux64_ross_python	(revision 24685)
+++ 	(revision )
@@ -1,60 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--disable-static \
-	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9 -lgfortran" \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi -lmpicxx -lmpifort" \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-chaco-dir="$ISSM_DIR/externalpackages/chaco/install" \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=0
-PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external packages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						cmake         install.sh
-						mpich         install-3.2-linux64.sh
-						petsc         install-3.7-linux64.sh
-						triangle      install-linux64.sh
-						chaco         install.sh
-						m1qn3         install.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=10
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=5
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-PYTHON_NROPTIONS="--exclude_name 'Dakota'"
-MATLAB_NROPTIONS=""
Index: sm/trunk/jenkins/linux64_ross_se
===================================================================
--- /issm/trunk/jenkins/linux64_ross_se	(revision 24685)
+++ 	(revision )
@@ -1,67 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--disable-static \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-python-dir=/usr \
-	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi -lmpicxx -lmpifort" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-boost-dir=$ISSM_DIR/externalpackages/boost/install \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9 -lgfortran" \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external packages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						cmake         install.sh
-						mpich         install-3.2-linux64.sh
-						petsc         install-3.7-linux64.sh
-						triangle      install-linux64.sh
-						boost         install-1.55-linux.sh
-						gshhg         install.sh
-						hdf5          install.sh
-						netcdf        install.sh
-						gmt           install.sh
-						gmsh          install.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=5
-
-#number of cpus used in the nightly runs. (pb of access to all_vertices.txt if more than 1)
-NUMCPUS_RUN=1
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-PYTHON_NROPTIONS="--benchmark=slr"
-MATLAB_NROPTIONS="'benchmark','slr'"
Index: sm/trunk/jenkins/linux64_ross_static
===================================================================
--- /issm/trunk/jenkins/linux64_ross_static	(revision 24685)
+++ 	(revision )
@@ -1,65 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--enable-standalone-executables \
-	--enable-standalone-modules \
-	--enable-standalone-libraries \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="$ISSM_DIR/externalpackages/mpich/install/lib/libmpifort.a $ISSM_DIR/externalpackages/mpich/install/lib/libmpi.a -lrt -lpthread" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-math77-dir=$ISSM_DIR/externalpackages/math77/install \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu -lgfortran" \
-	--with-numthreads=4 \
-	--with-pic'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=0
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools    install.sh
-						cmake        install.sh
-						mpich        install-3.2-linux64-static.sh
-						m1qn3        install.sh
-						petsc        install-3.7-linux64-static.sh
-						triangle     install-linux64.sh
-						math77        install.sh
-						gmsh          install-static.sh
-						shell2junit  install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=4
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=4
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-##                           FS
-#PYTHON_NROPTIONS=""
-#MATLAB_NROPTIONS=""
Index: sm/trunk/jenkins/linux64_ross_test
===================================================================
--- /issm/trunk/jenkins/linux64_ross_test	(revision 24685)
+++ 	(revision )
@@ -1,67 +1,0 @@
-#
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/usr/local/MATLAB/R2015a"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR\
-	--disable-static \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-python-dir=/usr\
-	--with-python-numpy-dir=/usr/lib/python2.7/dist-packages/numpy\
-	--with-chaco-dir="$ISSM_DIR/externalpackages/chaco/install" \
-	--with-dakota-dir=$ISSM_DIR/externalpackages/dakota/install \
-	--with-boost-dir=$ISSM_DIR/externalpackages/boost/install/ \
-	--with-fortran-lib="-L/usr/lib/gcc/x86_64-linux-gnu/4.9/ -lgfortran" \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-numthreads=4 \
-	--enable-development \
-	--enable-debugging '
-
-#PYTHON and MATLAB testing
-#MATLAB_TEST=1
-#PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools     install.sh
-						mpich         install-3.2-linux64.sh
-						petsc         install-3.7-linux64.sh
-						triangle      install-linux64.sh
-						boost         install-1.55-linux.sh
-						dakota        install-6.2-linux64.sh
-						chaco         install.sh
-						m1qn3         install.sh
-						shell2junit   install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=6
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=3
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-PYTHON_NROPTIONS="--exclude 243 701 702 435"
-MATLAB_NROPTIONS="'exclude',[243,701,702,435]"
Index: sm/trunk/jenkins/macosx_pine-island
===================================================================
--- /issm/trunk/jenkins/macosx_pine-island	(revision 24685)
+++ 	(revision )
@@ -1,65 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/Applications/MATLAB_R2015b.app"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install  \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-semic-dir=$ISSM_DIR/externalpackages/semic/install \
-	--with-numthreads=4 \
-	--enable-debugging \
-	--enable-development'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools    install.sh
-						cmake        install.sh
-						mpich        install-3.2-macosx64.sh
-						petsc        install-3.7-macosx64.sh
-						triangle     install-macosx64.sh
-						m1qn3        install.sh
-						semic        install.sh
-						shell2junit  install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-# Number of CPUs used in ISSM compilation
-#
-# NOTE: One is usually safer as some packages are very sensitive to parallel
-# 		compilation.
-#
-NUMCPUS_INSTALL=4
-
-# Number of CPUs used in the nightly runs
-NUMCPUS_RUN=4
-
-# Nightly run options
-#
-# See documentation in test/NightlyRun/runme.* for more information.
-#
-# NOTE:
-#	- test701.m is skipped because it uses full Stokes equations
-#
-MATLAB_NROPTIONS="'exclude',[701,702,703,435,IdFromString('Dakota')]"
-PYTHON_NROPTIONS="--exclude_name 'Dakota'"
Index: sm/trunk/jenkins/macosx_pine-island_dakota
===================================================================
--- /issm/trunk/jenkins/macosx_pine-island_dakota	(revision 24685)
+++ 	(revision )
@@ -1,66 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/Applications/MATLAB_R2015b.app"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-python-dir=/System/Library/Frameworks/Python.framework/Versions/2.7 \
-	--with-python-numpy-dir=/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install  \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-boost-dir=$ISSM_DIR/externalpackages/boost/install \
-	--with-dakota-dir=$ISSM_DIR/externalpackages/dakota/install \
-	--with-chaco-dir="$ISSM_DIR/externalpackages/chaco/install" \
-	--with-cxxoptflags="-std=c++11" \
-	--with-numthreads=4 \
-	--enable-debugging \
-	--enable-development'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=1
-PYTHON_TEST=1
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools    install.sh
-						cmake 		 install.sh
-						mpich        install-3.2-macosx64.sh
-						chaco        install-macosx64.sh
-						m1qn3        install.sh
-						petsc        install-3.7-macosx64.sh
-						triangle     install-macosx64.sh
-						boost        install-1.55-macosx-el_capitan.sh
-						dakota       install-6.2-macosx64.sh
-						shell2junit  install.sh"
-
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=4
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=4
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"  ERRORS ARE LARGE FOR: 418 420
-PYTHON_NROPTIONS="--exclude 119 243 514 701 702 703 234 235 418 420 --include_name 'Dakota'"
-MATLAB_NROPTIONS="'exclude',[119,243,514,701,702,703,234,235,418,420],'id',[IdFromString('Dakota')]"
Index: /issm/trunk/jenkins/macosx_pine-island_examples
===================================================================
--- /issm/trunk/jenkins/macosx_pine-island_examples	(revision 24685)
+++ /issm/trunk/jenkins/macosx_pine-island_examples	(revision 24686)
@@ -1,28 +1,32 @@
-
 #-------------------------------#
 # 1: ISSM general configuration #
 #-------------------------------#
 
-#MATLAB path
+# MATLAB path
 MATLAB_PATH="/Applications/MATLAB_R2015b.app"
 
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
+# ISSM CONFIGURATION
+ISSM_CONFIG='\
+	--prefix=$ISSM_DIR \
 	--with-matlab-dir=$MATLAB_PATH \
+	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/mpich/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
+	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
+	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
+	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
 	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include  \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install  \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install/ \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
+	--with-chaco-dir=$ISSM_DIR/externalpackages/chaco/install \
 	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
+	--with-semic-dir=$ISSM_DIR/externalpackages/semic/install \
 	--with-numthreads=4 \
 	--enable-debugging \
-	--enable-development'
+	--enable-development \
+'
 
-#PYTHON and MATLAB testing
+# Test suites
 MATLAB_TEST=0
 PYTHON_TEST=0
+JAVASCRIPT_TEST=0
 EXAMPLES_TEST=1
 
@@ -31,12 +35,23 @@
 #-----------------------------------#
 
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools    install.sh
-						cmake        install.sh
-						mpich        install-3.2-macosx64.sh
-						m1qn3        install.sh
-						petsc        install-3.7-macosx64.sh
-						triangle     install-macosx64.sh
-						shell2junit  install.sh"
+# List of external pakages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install.sh
+	cmake		install.sh
+	mpich		install-3.3.sh
+	petsc		install-3.7-mac.sh
+	triangle	install-mac.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	hdf5		install-1.10.sh
+	netcdf		install-4.7.sh
+	proj		install-6.2.sh
+	gdal		install-3.0-netcdf.sh
+	gshhg		install.sh
+	gmt			install.sh
+	gmsh		install.sh
+	shell2junit	install.sh
+"
 
 #-----------------#
@@ -44,5 +59,19 @@
 #-----------------#
 
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+#       compilation
+#
 NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+PYTHON_NROPTIONS=""
+MATLAB_NROPTIONS=""
+
Index: sm/trunk/jenkins/macosx_pine-island_static
===================================================================
--- /issm/trunk/jenkins/macosx_pine-island_static	(revision 24685)
+++ 	(revision )
@@ -1,65 +1,0 @@
-
-#-------------------------------#
-# 1: ISSM general configuration #
-#-------------------------------#
-
-#MATLAB path
-MATLAB_PATH="/Applications/MATLAB_R2015b.app"
-
-#ISSM CONFIGURATION
-ISSM_CONFIG='--prefix=$ISSM_DIR \
-	--disable-static \
-	--enable-standalone-executables \
-	--enable-standalone-libraries \
-	--with-matlab-dir=$MATLAB_PATH \
-	--with-triangle-dir=$ISSM_DIR/externalpackages/triangle/install \
-	--with-mpi-include=$ISSM_DIR/externalpackages/mpich/install/include \
-	--with-mpi-libflags="-L$ISSM_DIR/externalpackages/mpich/install/lib -lmpi -lpmpi -lmpifort -lmpicxx" \
-	--with-petsc-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-scalapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-blas-lapack-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-mumps-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-metis-dir=$ISSM_DIR/externalpackages/petsc/install \
-	--with-m1qn3-dir=$ISSM_DIR/externalpackages/m1qn3/install \
-	--with-math77-dir=$ISSM_DIR/externalpackages/math77/install \
-	--with-fortran-lib="/usr/local/gfortran/lib/libgfortran.a /usr/local/gfortran/lib/libquadmath.a /usr/local/gfortran/lib/gcc/x86_64-apple-darwin14/5.2.0/libgcc.a"'
-
-#PYTHON and MATLAB testing
-MATLAB_TEST=0
-PYTHON_TEST=0
-
-#-----------------------------------#
-# 3: External packages installation #
-#-----------------------------------#
-
-#List of external pakages to be installed and their installation scripts
-EXTERNALPACKAGES="autotools		install.sh
-					cmake		install.sh
-					mpich		install-3.2-macosx64-static.sh
-					m1qn3		install.sh
-					petsc		install-3.7-macosx64-static.sh
-					triangle	install-macosx64.sh
-					math77		install.sh
-					gmt			install-mac-precompiled.sh
-					gmsh		install-mac-precompiled.sh
-					shell2junit	install.sh"
-
-#for SLR we need gmsh to mesh, math77, and gmt (which itself needs gdal and netcdf...)
-#-----------------#
-# 4: test options #
-#-----------------#
-
-#number of cpus used in ISSM installation and compilation (one is usually
-#safer as some packages are very sensitive to parallel compilation)
-NUMCPUS_INSTALL=4
-
-#number of cpus used in the nightly runs.
-NUMCPUS_RUN=4
-
-#Nightly run options. The matlab routine runme.m will be called
-#as follows: runme($MATLAB_NROPTIONS). The options must be understandable
-#by Matlab and runme.m
-#ex: "'id',[101 102 103]"
-##                           bamg mesh   FS
-#PYTHON_NROPTIONS=""
-#MATLAB_NROPTIONS=""
Index: /issm/trunk/jenkins/pine_island-mac
===================================================================
--- /issm/trunk/jenkins/pine_island-mac	(revision 24686)
+++ /issm/trunk/jenkins/pine_island-mac	(revision 24686)
@@ -0,0 +1,74 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+# MATLAB path
+MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+
+# ISSM CONFIGURATION
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L/usr/local/Cellar/gcc/9.3.0/lib/gcc/9 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+# List of external pakages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install.sh
+	cmake		install.sh
+	petsc		install-3.12-mac.sh
+	triangle	install-mac.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information
+#
+# NOTE:
+# - test701 is skipped because it uses full Stokes equations
+#
+MATLAB_NROPTIONS="'exclude',[435,701,702,703,IdFromString('Dakota')]"
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/pine_island-mac-binaries
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-binaries	(revision 24686)
+++ /issm/trunk/jenkins/pine_island-mac-binaries	(revision 24686)
@@ -0,0 +1,75 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+# MATLAB path
+MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+
+# NOTE:
+# - We can disable dependency tracking in the Autotools because the binaries
+#	should always be a one-time build.
+#
+
+# ISSM CONFIGURATION
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="/usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libgfortran.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libquadmath.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/gcc/x86_64-apple-darwin15/9.3.0/libgcc.a" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install.sh
+	cmake		install.sh
+	petsc		install-3.12-mac-static.sh
+	triangle	install-mac-static.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=1
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+PYTHON_NROPTIONS=""
+MATLAB_NROPTIONS=""
Index: /issm/trunk/jenkins/pine_island-mac-binaries-with_dakota
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-binaries-with_dakota	(revision 24686)
+++ /issm/trunk/jenkins/pine_island-mac-binaries-with_dakota	(revision 24686)
@@ -0,0 +1,81 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+# MATLAB path
+MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+
+# NOTE:
+# - We can disable dependency tracking in the Autotools because the binaries
+#	should always be a one-time build.
+#
+
+# ISSM CONFIGURATION
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/System/Library/Frameworks/Python.framework/Versions/2.7 \
+	--with-python-numpy-dir=/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy \
+	--with-fortran-lib="/usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libgfortran.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/libquadmath.a /usr/local/Cellar/gcc/9.3.0/lib/gcc/9/gcc/x86_64-apple-darwin15/9.3.0/libgcc.a" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install.sh
+	cmake		install.sh
+	petsc		install-3.12-mac-static.sh
+	boost		install-1.72-mac-static.sh
+	dakota		install-6.2-mac-static.sh
+	triangle	install-mac-static.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=1
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+PYTHON_NROPTIONS=""
+MATLAB_NROPTIONS=""
Index: /issm/trunk/jenkins/pine_island-mac-dakota
===================================================================
--- /issm/trunk/jenkins/pine_island-mac-dakota	(revision 24686)
+++ /issm/trunk/jenkins/pine_island-mac-dakota	(revision 24686)
@@ -0,0 +1,82 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+# MATLAB path
+MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+
+# ISSM CONFIGURATION
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/System/Library/Frameworks/Python.framework/Versions/2.7 \
+	--with-python-numpy-dir=/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy \
+	--with-fortran-lib="-L/usr/local/Cellar/gcc/9.3.0/lib/gcc/9 -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+#List of external pakages to be installed and their installation scripts
+EXTERNALPACKAGES="
+	autotools	install.sh
+	cmake		install.sh
+	petsc		install-3.12-mac.sh
+	boost		install-1.72-mac.sh
+	dakota		install-6.2-mac.sh
+	triangle	install-mac.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation
+#
+NUMCPUS_INSTALL=4
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - See test418.py for reason why it is excluded for now
+# - Error is large for test420
+# - test701 is skipped because it uses full Stokes equations
+#
+MATLAB_NROPTIONS="'exclude',[234,243,420,435,701,702,703,'id',[IdFromString('Dakota')]"
+PYTHON_NROPTIONS="--exclude 234 243 418 420 435 701 702 703 --include_name 'Dakota'"
Index: /issm/trunk/jenkins/ross-debian_linux
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux	(revision 24686)
@@ -0,0 +1,67 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	triangle	install-linux.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota')]"
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-adolc-ampioff
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-adolc-ampioff	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-adolc-ampioff	(revision 24686)
@@ -0,0 +1,78 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--without-kriging \
+	--without-kml \
+	--without-GiaIvins \
+	--without-Love \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include  \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-adolc-dir=${ISSM_DIR}/externalpackages/adolc/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+# NOTE: The PETSc libraries are compiled but not used as they conflict with
+#		ADOL-C: PETSc is really just being used as an installer for other
+#		external packages.
+#
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	triangle	install-linux.sh
+	gsl			install-linux64.sh
+	adolc		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - test3010 is skipped because it triggers GEMB module and a subsequent
+#	failure on ampioff configuration (really, likely a segmentation fault)
+#
+MATLAB_NROPTIONS="'benchmark','adolc','id',[3001:3019],'exclude',3010"
+PYTHON_NROPTIONS="--benchmark=adolc -i 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 --exclude=3010"
Index: /issm/trunk/jenkins/ross-debian_linux-adolc-ampion
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-adolc-ampion	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-adolc-ampion	(revision 24686)
@@ -0,0 +1,80 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--without-kriging \
+	--without-kml \
+	--without-GiaIvins \
+	--without-Love \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include  \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-ampi-dir=${ISSM_DIR}/externalpackages/adjoinablempi/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-adolc-dir=${ISSM_DIR}/externalpackages/adolc/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+# NOTE: The PETSc libraries are compiled but not used as they conflict with
+#		ADOL-C: PETSc is really just being used as an installer for other
+#		external packages.
+#
+EXTERNALPACKAGES="
+	autotools		install-debian-linux.sh
+	cmake			install.sh
+	petsc			install-3.12-linux.sh
+	triangle		install-linux.sh
+	gsl				install-linux64.sh
+	adjoinablempi	install.sh
+	adolc			install-withampi.sh
+	shell2junit		install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# PYTHON and MATLAB testing
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - test3010 is skipped because it triggers GEMB module and a subsequent
+#	failure on ampioff configuration (really, likely a segmentation fault)
+#
+MATLAB_NROPTIONS="'benchmark','adolc','id',[3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3119],'exclude',3010"
+PYTHON_NROPTIONS="--benchmark=adolc -i 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3101 3102 3103 3104 3105 3106 3107 3108 3109 3119 --exclude=3010"
Index: /issm/trunk/jenkins/ross-debian_linux-binaries
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-binaries	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-binaries	(revision 24686)
@@ -0,0 +1,75 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
+# NOTE:
+# - We can disable dependency tracking in the Autotools because the binaries
+#	should always be a one-time build.
+# - libgfortran will not be available in $ISSM_DIR/lib at compile time: it is
+#	copied by packaging script.
+#
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-numthreads=4 \
+	--with-pic \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L${ISSM_DIR}/lib -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux-static.sh
+	triangle	install-linux-static.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=1
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-binaries-with_dakota
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-binaries-with_dakota	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-binaries-with_dakota	(revision 24686)
@@ -0,0 +1,83 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+
+# NOTE:
+# - We can disable dependency tracking in the Autotools because the binaries
+#	should always be a one-time build.
+# - libgfortran will not be available in $ISSM_DIR/lib at compile time: it is
+#	copied by packaging script.
+#
+ISSM_CONFIG='\
+	--prefix="${ISSM_DIR}" \
+	--enable-standalone-executables \
+	--enable-standalone-modules \
+	--enable-standalone-libraries \
+	--disable-dependency-tracking \
+	--with-cxxoptflags="-std=c++11" \
+	--with-numthreads=4 \
+	--with-pic \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L${ISSM_DIR}/lib -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux-static.sh
+	boost		install-1.55-linux-static.sh
+	dakota		install-6.2-linux-static.sh
+	triangle	install-linux-static.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=1
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-codipack
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-codipack	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-codipack	(revision 24686)
@@ -0,0 +1,76 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--enable-tape-alloc \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--without-kriging \
+	--without-kml \
+	--without-GiaIvins \
+	--without-Love \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+	--with-gsl-dir=${ISSM_DIR}/externalpackages/gsl/install \
+	--with-medipack-dir="${ISSM_DIR}/externalpackages/medipack/install" \
+	--with-codipack-dir="${ISSM_DIR}/externalpackages/codipack/install" \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	triangle	install-linux.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	gsl			install-linux64.sh
+	medipack	install.sh
+	codipack	install.sh
+	shell2junit install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS="'benchmark','all','id',[3015,3119,3480]"
+PYTHON_NROPTIONS="--benchmark='all' -i 3015 3119 3480"
Index: /issm/trunk/jenkins/ross-debian_linux-dakota
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-dakota	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-dakota	(revision 24686)
@@ -0,0 +1,78 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-cxxoptflags="-std=c++11" \
+	--with-pic \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-dakota-dir=${ISSM_DIR}/externalpackages/dakota/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	boost		install-1.55-linux.sh
+	dakota		install-6.2-linux.sh
+	triangle	install-linux.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE:
+# - See test418.py for reason why it is excluded for now.
+#
+MATLAB_NROPTIONS="'exclude',[243,435,701,702],'id',[IdFromString('Dakota')]"
+PYTHON_NROPTIONS="--exclude 243 418 435 701 702 --include_name 'Dakota'"
Index: /issm/trunk/jenkins/ross-debian_linux-gia
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-gia	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-gia	(revision 24686)
@@ -0,0 +1,67 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-gia=yes \
+	--with-numthreads=4
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-math77-dir=${ISSM_DIR}/externalpackages/math77/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	triangle	install-linux.sh
+	math77		install.sh
+	gmsh		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS="'benchmark','all','id',[2001:2100]"
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-iceocean
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-iceocean	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-iceocean	(revision 24686)
@@ -0,0 +1,71 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-ocean \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	triangle	install-linux.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=0
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+# NOTE: Currently not including 4003 while Dimitri and Helenen work on the
+#		coupling
+#
+MATLAB_NROPTIONS="'benchmark','all','id',[4001,4002]"
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-javascript
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-javascript	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-javascript	(revision 24686)
@@ -0,0 +1,58 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-shared \
+	--enable-development \
+	--enable-debugging \
+	--with-javascript \
+	--without-fortran \
+	--without-GiaIvins \
+	--without-Love \
+	--without-kml \
+	--without-kriging \
+	--with-gsl-dir="${ISSM_DIR}/externalpackages/gsl/install-javascript" \
+	--with-triangle-dir="${ISSM_DIR}/externalpackages/triangle/install-javascript" \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	emscripten	install.sh
+	gsl			install-javascript.sh
+	triangle	install-linux-javascript.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=0
+JAVASCRIPT_TEST=1
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+#		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS=""
Index: /issm/trunk/jenkins/ross-debian_linux-python
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-python	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-python	(revision 24686)
@@ -0,0 +1,67 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-chaco-dir=${ISSM_DIR}/externalpackages/chaco/install \
+	--with-m1qn3-dir=${ISSM_DIR}/externalpackages/m1qn3/install \
+	--with-semic-dir=${ISSM_DIR}/externalpackages/semic/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux.sh
+	triangle	install-linux.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=0
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+# 		compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of CPUs used in the nightly runs
+NUMCPUS_RUN=2
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS=""
+PYTHON_NROPTIONS="--exclude_name 'Dakota'"
Index: /issm/trunk/jenkins/ross-debian_linux-solid_earth
===================================================================
--- /issm/trunk/jenkins/ross-debian_linux-solid_earth	(revision 24686)
+++ /issm/trunk/jenkins/ross-debian_linux-solid_earth	(revision 24686)
@@ -0,0 +1,78 @@
+#--------------------#
+# ISSM Configuration #
+#--------------------#
+
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+ISSM_CONFIG='\
+	--prefix=${ISSM_DIR} \
+	--disable-static \
+	--enable-development \
+	--enable-debugging \
+	--with-numthreads=4 \
+	--with-matlab-dir=${MATLAB_PATH} \
+	--with-python-dir=/usr \
+	--with-python-numpy-dir=/usr/local/lib/python2.7/dist-packages/numpy \
+	--with-fortran-lib="-L/usr/lib/x86_64-linux-gnu -lgfortran" \
+	--with-mpi-include=${ISSM_DIR}/externalpackages/petsc/install/include \
+	--with-mpi-libflags="-L${ISSM_DIR}/externalpackages/petsc/install/lib -lmpi -lmpicxx -lmpifort" \
+	--with-blas-lapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-metis-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-scalapack-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-mumps-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-petsc-dir=${ISSM_DIR}/externalpackages/petsc/install \
+	--with-triangle-dir=${ISSM_DIR}/externalpackages/triangle/install \
+	--with-boost-dir=${ISSM_DIR}/externalpackages/boost/install \
+'
+
+#-------------------#
+# External Packages #
+#-------------------#
+
+EXTERNALPACKAGES="
+	autotools	install-debian-linux.sh
+	cmake		install.sh
+	petsc		install-3.12-linux-solid_earth.sh
+	triangle	install-linux.sh
+	chaco		install.sh
+	m1qn3		install.sh
+	semic		install.sh
+	boost		install-1.55-linux.sh
+	curl		install-7.67.sh
+	netcdf		install-4.7.sh
+	proj		install-6.2.sh
+	gdal		install-3.0-python-netcdf.sh
+	gshhg		install.sh
+	gmt			install-6.0-linux.sh
+	gmsh		install.sh
+	shell2junit	install.sh
+"
+
+#---------#
+# Testing #
+#---------#
+
+# Test suites
+MATLAB_TEST=1
+PYTHON_TEST=1
+JAVASCRIPT_TEST=0
+EXAMPLES_TEST=0
+
+# Number of CPUs used in ISSM compilation
+#
+# NOTE: One is usually safer as some packages are very sensitive to parallel
+#       compilation.
+#
+NUMCPUS_INSTALL=8
+
+# Number of cpus used in the nightly runs
+#
+# NOTE: Possible problem of access to all_vertices.txt if more than 1.
+#
+NUMCPUS_RUN=1
+
+# Nightly run options
+#
+# See documentation in test/NightlyRun/runme.* for more information.
+#
+MATLAB_NROPTIONS="'benchmark','slr'"
+PYTHON_NROPTIONS="--benchmark=slr"
Index: /issm/trunk/jenkins/windows
===================================================================
--- /issm/trunk/jenkins/windows	(revision 24685)
+++ /issm/trunk/jenkins/windows	(revision 24686)
@@ -41,5 +41,5 @@
 						petsc       install-3.6-win10.sh
 						metis       install-4.0-win10.sh
-						triangle    install-win10.sh
+						triangle    install-windows-static.sh
 						shell2junit install.sh"
 
Index: /issm/trunk/m4/issm_options.m4
===================================================================
--- /issm/trunk/m4/issm_options.m4	(revision 24685)
+++ /issm/trunk/m4/issm_options.m4	(revision 24686)
@@ -1,6 +1,19 @@
 dnl ISSM Options
 
+dnl TODO:
+dnl - Check if we need statements such as,
+dnl
+dnl.  	  AM_CONDITIONAL([JAVASCRIPT], [test "x${HAVE_JAVASCRIPT}" = "xyes"])
+dnl
+dnl	  when we have already performed a similar check,
+dnl
+dnl  	  if test "x${JAVASCRIPT}" = "xno"; then
+dnl
+dnl - Move library dependency checks from end of file to appropriate places
+dnl   inline
+dnl - Refactor conditionals that test both -d <file> and -f <file>
+dnl
+
 AC_DEFUN([ISSM_OPTIONS],[
-
 	AC_MSG_NOTICE(============================================================================)
 	AC_MSG_NOTICE(=                      Checking ISSM specific options                      =)
@@ -10,16 +23,16 @@
 	dnl Build info{{{
 
-	dnl build date
+	dnl Build date
 	AC_PATH_PROGS(DATE, date)
-	AC_MSG_CHECKING(for build date)
-	if test "$DATE" ; then
+	AC_MSG_CHECKING([for build date])
+	if test "$DATE"; then
 		PACKAGE_DATE=`date`
 	else
 		PACKAGE_DATE="unknown"
 	fi
-	AC_DEFINE_UNQUOTED(PACKAGE_BUILD_DATE,"$PACKAGE_DATE", Build date)
-	AC_MSG_RESULT($PACKAGE_DATE)
-
-	dnl user name
+	AC_DEFINE_UNQUOTED(PACKAGE_BUILD_DATE, "$PACKAGE_DATE", [build date])
+	AC_MSG_RESULT([${PACKAGE_DATE}])
+
+	dnl User name
 	AC_MSG_CHECKING([user name])
 	if test -n "$USER"
@@ -27,131 +40,140 @@
 		user_name="$USER"
 	else
-		if test -n "$LOGNAME"
-		then
-			user_name="$LOGNAME"
+		if test -n "$LOGNAME"; then
+			user_name ="$LOGNAME"
 		else
-		   user_name=`(whoami) 2>/dev/null` || user_name=unknown
-		fi
-	fi
-	AC_DEFINE_UNQUOTED(USER_NAME, "$user_name", Build user name)
-	AC_MSG_RESULT($user_name)
+			user_name =`(whoami) 2>/dev/null` || user_name=unknown
+		fi
+	fi
+	AC_DEFINE_UNQUOTED(USER_NAME, "$user_name", [user name])
+	AC_MSG_RESULT([${user_name}])
 
 	AC_MSG_CHECKING([host full OS name and version])
-	dnl normalize some host OS names
+	dnl Normalize some host OS names
 	case ${host_os} in
-	  dnl linux is linux is linux, regardless of RMS.
-	  linux-gnu* | lignux* )	host_os=linux ;;
+		dnl linux is linux is linux, regardless of RMS
+		linux-gnu* | lignux* )	host_os=linux ;;
 	esac
-	AC_DEFINE_UNQUOTED(HOST_OS, "$host_os", Host full OS name and version)
-	AC_MSG_RESULT($host_os)
-
-  AC_MSG_CHECKING([host cpu])
-  AC_DEFINE_UNQUOTED(HOST_CPU, "$host_cpu",Host cpu)
-  AC_MSG_RESULT($host_cpu)
-
-  AC_MSG_CHECKING([vendor])
-  AC_DEFINE_UNQUOTED(HOST_VENDOR, "$host_vendor",Host vendor)
-  AC_MSG_RESULT($host_vendor)
-
-  AC_MSG_CHECKING([host OS name])
-  host_os_name=`echo $host_os | sed 's/\..*//g'`
-  dnl normalize some OS names
-  case ${host_os_name} in
-	dnl linux is linux is linux, regardless of RMS.
-	linux-gnu* | lignux* )	host_os_name=linux ;;
-  esac
-  AC_DEFINE_UNQUOTED(HOST_OS_NAME, "$host_os_name", Host OS name)
-  AC_MSG_RESULT($host_os_name)
-
-	dnl parse out the OS version of the host
-  AC_MSG_CHECKING([host OS version])
-  host_os_version=`echo $host_os | sed 's/^[[^0-9]]*//g'`
-  if test -z "$host_os_version"
-  then
-	host_os_version=`(uname -r) 2>/dev/null` || host_os_version=unknown
-  fi
-  AC_DEFINE_UNQUOTED(HOST_OS_VERSION, "$host_os_version", Host OS version)
-  AC_MSG_RESULT($host_os_version)
-
-
-	dnl figure out host architecture (different than CPU)
-  AC_MSG_CHECKING([host OS architecture])
-  host_arch=`(uname -m) 2>/dev/null` || host_arch=unknown
-  dnl normalize some names
-  case ${host_arch} in
-	sun4* )	host_arch=sun4 ;;
-	sun3x )	host_arch=sun3 ;;
-	sun )	host_arch=`(arch) 2>/dev/null` || host_arch=unknown ;;
-	i?86 )	host_arch=i386 ;; # all x86 should show up as i386
-  esac
-  AC_DEFINE_UNQUOTED(HOST_ARCH, "$host_arch",Host Archictecture)
-  AC_MSG_RESULT($host_arch)
+	AC_DEFINE_UNQUOTED(HOST_OS, "$host_os", [host full OS name and version])
+	AC_MSG_RESULT([${host_os}])
+
+	AC_MSG_CHECKING([host cpu])
+	AC_DEFINE_UNQUOTED(HOST_CPU, "$host_cpu", [host CPU])
+	AC_MSG_RESULT([${host_cpu}])
+
+	AC_MSG_CHECKING([vendor])
+	AC_DEFINE_UNQUOTED(HOST_VENDOR, "$host_vendor", [host vendor])
+	AC_MSG_RESULT([${host_vendor}])
+
+	AC_MSG_CHECKING([host OS name])
+	host_os_name=`echo $host_os | sed 's/\..*//g'`
+	dnl Normalize some OS names
+	case ${host_os_name} in
+		dnl linux is linux is linux, regardless of RMS.
+		linux-gnu* | lignux* )	host_os_name=linux ;;
+	esac
+	AC_DEFINE_UNQUOTED(HOST_OS_NAME, "$host_os_name", [host OS name])
+	AC_MSG_RESULT([${host_os_name}])
+
+	dnl Parse out the OS version of the host
+	AC_MSG_CHECKING([host OS version])
+	host_os_version=`echo $host_os | sed 's/^[[^0-9]]*//g'`
+	if test -z "$host_os_version"; then
+		host_os_version=`(uname -r) 2>/dev/null` || host_os_version=unknown
+	fi
+	AC_DEFINE_UNQUOTED(HOST_OS_VERSION, "$host_os_version", [host OS version])
+	AC_MSG_RESULT([${host_os_version}])
+
+	dnl Determine host architecture (different than CPU)
+	AC_MSG_CHECKING([host OS architecture])
+	host_arch=`(uname -m) 2>/dev/null` || host_arch=unknown
+	dnl Normalize some names
+	case ${host_arch} in
+		sun4* )	host_arch=sun4 ;;
+		sun3x )	host_arch=sun3 ;;
+		sun )	host_arch=`(arch) 2>/dev/null` || host_arch=unknown ;;
+		i?86 )	host_arch=i386 ;; # all x86 should show up as i386
+	esac
+	AC_DEFINE_UNQUOTED(HOST_ARCH, "$host_arch", [host archictecture])
+	AC_MSG_RESULT([${host_arch}])
 
 	dnl }}}
 	dnl Debugging {{{
-	AC_ARG_ENABLE([debugging],                                        dnl feature
-		AS_HELP_STRING([--enable-debugging],[turn debug support on]),  dnl help string
-		[enable_debugging=$enableval],                                 dnl action if given
-		[enable_debugging=no])                                         dnl action if not given
-
+	AC_ARG_ENABLE(
+		[debugging],													dnl feature
+		AS_HELP_STRING([--enable-debugging], [turn debug support on]),	dnl help string
+		[enable_debugging=${enableval}],								dnl action if given
+		[enable_debugging=no]											dnl action if not given
+	)
 	AC_MSG_CHECKING(for debugging support)
-	if test "x$enable_debugging" = xyes; then
-		AC_DEFINE([_ISSM_DEBUG_],[1],[Macro to enable debugging in ISSM])
-	fi
-	AC_MSG_RESULT($enable_debugging)
+	if test "x${enable_debugging}" == "xyes"; then
+		AC_DEFINE([_ISSM_DEBUG_], [1], [Macro to enable debugging in ISSM])
+	fi
+	AC_MSG_RESULT([${enable_debugging}])
 	dnl }}}
 	dnl Development{{{
-	AC_ARG_ENABLE([development],                                      dnl feature
-		AS_HELP_STRING([--enable-development],[turn development on]),  dnl help string
-		[enable_development=$enableval],                                 dnl action if given
-		[enable_development=no])                                      dnl action if not given
-
+	AC_ARG_ENABLE(
+		[development],													dnl feature
+		AS_HELP_STRING([--enable-development], [turn development on]),  dnl help string
+		[enable_development=${enableval}],								dnl action if given
+		[enable_development=no]											dnl action if not given
+	)
 	AC_MSG_CHECKING(for development support)
-	if test "x$enable_development" = xyes; then
-		AC_DEFINE([_DEVELOPMENT_],[1],[Macro to enable development version in ISSM])
-	fi
-	AM_CONDITIONAL([DEVELOPMENT], [test x$enable_development = xyes])
-	AC_MSG_RESULT($enable_development)
-	 dnl }}}
-    dnl Standalone Options {{{
-    AC_ARG_ENABLE([standalone-modules],                                                      dnl feature
-        AS_HELP_STRING([--enable-standalone-modules], [produce standalone modules]),         dnl help string
-        [enable_standalone_modules=$enableval],                                              dnl action if given
-        [enable_standalone_modules=no])                                                      dnl action if not given
+	if test "x${enable_development}" == "xyes"; then
+		AC_DEFINE([_DEVELOPMENT_], [1], [enable development support in ISSM])
+	fi
+	AM_CONDITIONAL([DEVELOPMENT], [test "x${enable_development}" == "xyes"])
+	AC_MSG_RESULT([${enable_development}])
+	dnl }}}
+	dnl Standalone Options {{{
+	AC_ARG_ENABLE(
+		[standalone-modules],															dnl feature
+		AS_HELP_STRING([--enable-standalone-modules], [produce standalone modules]),	dnl help string
+		[enable_standalone_modules=${enableval}],										dnl action if given
+		[enable_standalone_modules=no]													dnl action if not given
+	)
 	AC_MSG_CHECKING(for standalone modules build)
-    AM_CONDITIONAL([STANDALONE_MODULES], [test x$enable_standalone_modules = xyes])
-	AC_MSG_RESULT($enable_standalone_modules)
-
-    AC_ARG_ENABLE([standalone-executables],                                                  dnl feature
-        AS_HELP_STRING([--enable-standalone-executables], [produce standalone executables]), dnl help string
-        [enable_standalone_executables=$enableval],                                          dnl action if given
-        [enable_standalone_executables=no])                                                  dnl action if not given
+	AM_CONDITIONAL([STANDALONE_MODULES], [test "x${enable_standalone_modules}" == "xyes"])
+	AC_MSG_RESULT([${enable_standalone_modules}])
+
+	AC_ARG_ENABLE(
+		[standalone-executables],																dnl feature
+		AS_HELP_STRING([--enable-standalone-executables], [produce standalone executables]),	dnl help string
+		[enable_standalone_executables=${enableval}],											dnl action if given
+		[enable_standalone_executables=no]														dnl action if not given
+	)
 	AC_MSG_CHECKING(for standalone executables build)
-    AM_CONDITIONAL([STANDALONE_EXECUTABLES], [test x$enable_standalone_executables = xyes])
-	AC_MSG_RESULT($enable_standalone_executables)
-
-    AC_ARG_ENABLE([standalone-libraries],                                                    dnl feature
-        AS_HELP_STRING([--enable-standalone-libraries], [produce standalone libraries]),     dnl help string
-        [enable_standalone_libraries=$enableval],                                            dnl action if given
-        [enable_standalone_libraries=no])                                                    dnl action if not given
+	AM_CONDITIONAL([STANDALONE_EXECUTABLES], [test "x${enable_standalone_executables}" == "xyes"])
+	AC_MSG_RESULT([${enable_standalone_executables}])
+
+	AC_ARG_ENABLE(
+		[standalone-libraries],																dnl feature
+		AS_HELP_STRING([--enable-standalone-libraries], [produce standalone libraries]),	dnl help string
+		[enable_standalone_libraries=${enableval}],											dnl action if given
+		[enable_standalone_libraries=no]													dnl action if not given
+	)
 	AC_MSG_CHECKING(for standalone libraries build)
-    AM_CONDITIONAL([STANDALONE_LIBRARIES], [test x$enable_standalone_libraries = xyes])
-	AC_MSG_RESULT($enable_standalone_libraries)
-    dnl }}}
-    dnl Version{{{
-    AC_ARG_ENABLE([version],                                   dnl feature
-    AS_HELP_STRING([--enable-version],[produce libISSM.so.0]), dnl help string
-    [enable_version=$enableval],                               dnl action if given
-    [enable_version=no])                                       dnl action if not given
-    AM_CONDITIONAL([VERSION], [test x$enable_VERSION = xyes])
-    dnl }}}
+	AM_CONDITIONAL([STANDALONE_LIBRARIES], [test "x${enable_standalone_libraries}" == "xyes"])
+	AC_MSG_RESULT([${enable_standalone_libraries}])
+	dnl }}}
+	dnl Version{{{
+	AC_ARG_ENABLE(
+		[version],													dnl feature
+		AS_HELP_STRING([--enable-version], [produce libISSM.so.0]),	dnl help string
+		[enable_version=${enableval}],								dnl action if given
+		[enable_version=no]											dnl action if not given
+	)
+	AM_CONDITIONAL([VERSION], [test "x${enable_version}" == "xyes"])
+	dnl }}}
 	dnl Wrappers build {{{
-	AC_ARG_WITH([wrappers],                                           dnl feature
-	AS_HELP_STRING([--with-wrappers = value],[wrappers compilation]), dnl help string
-	[WRAPPERS_VALUE=$withval],                                        dnl action if given
-	[WRAPPERS_VALUE="yes"])                                           dnl action if not given
+	AC_ARG_WITH(
+		[wrappers],															dnl feature
+		AS_HELP_STRING([--with-wrappers = value], [wrappers compilation]),	dnl help string
+		[WRAPPERS_VALUE=${withval}],										dnl action if given
+		[WRAPPERS_VALUE="yes"]												dnl action if not given
+	)
 	AC_MSG_CHECKING(for wrappers compilation)
-	AM_CONDITIONAL([WRAPPERS], [test x$WRAPPERS_VALUE = xyes])
-	AC_MSG_RESULT($WRAPPERS_VALUE)
+	AM_CONDITIONAL([WRAPPERS], [test "x${WRAPPERS_VALUE}" == "xyes"])
+	AC_MSG_RESULT([${WRAPPERS_VALUE}])
 	dnl }}}
 	dnl Extensions{{{
@@ -162,11 +184,16 @@
 	dnl ISSM's externalpackages
 	dnl vendor{{{
-	AC_ARG_WITH([vendor],                                              dnl feature
-	AS_HELP_STRING([--with-vendor = VENDOR],[vendor name, ex: intel]), dnl help string
-	[VENDOR=$withval],                                                 dnl action if given
-	[VENDOR=""])                                                       dnl action if not given
-	AC_MSG_CHECKING(for vendor compilers)
-	if test -n "$VENDOR"; then
-		if  test $VENDOR = intel-win32; then
+	AC_ARG_WITH(
+		[vendor],															dnl feature
+		AS_HELP_STRING([--with-vendor = VENDOR], [vendor name, ex: intel]),	dnl help string
+		[VENDOR=${withval}],												dnl action if given
+		[VENDOR=""]															dnl action if not given
+	)
+	dnl defaults for host OS related variables
+	IS_MAC=no
+	IS_WINDOWS=no
+	AC_MSG_CHECKING([for vendor compilers])
+	if test -n "${VENDOR}"; then
+		if test "${VENDOR}" == "intel-win32"; then
 			export CC=icl
 			export CXX=icl
@@ -174,5 +201,5 @@
 			export CXXFLAGS="-DWIN32 -D_INTEL_WIN_"
 			IS_WINDOWS=yes
-		elif  test $VENDOR = intel-win7-32; then
+		elif test "${VENDOR}" == "intel-win7-32"; then
 			export CC=cl
 			export CXX=cl
@@ -184,5 +211,5 @@
 			IS_WINDOWS=yes
 			OSLIBS="-Wl,kernel32.lib -Wl,user32.lib -Wl,gdi32.lib -Wl,winspool.lib -Wl,comdlg32.lib -Wl,advapi32.lib -Wl,shell32.lib -Wl,ole32.lib -Wl,oleaut32.lib -Wl,uuid.lib -Wl,odbc32.lib -Wl,odbccp32.lib"
-		elif  test $VENDOR = intel-win7-64; then
+		elif test "${VENDOR}" == "intel-win7-64"; then
 			export CC=cl
 			export CXX=cl
@@ -194,5 +221,5 @@
 			IS_WINDOWS=yes
 			OSLIBS="-Wl,kernel32.lib -Wl,user32.lib -Wl,gdi32.lib -Wl,winspool.lib -Wl,comdlg32.lib -Wl,advapi32.lib -Wl,shell32.lib -Wl,ole32.lib -Wl,oleaut32.lib -Wl,uuid.lib -Wl,odbc32.lib -Wl,odbccp32.lib"
-		elif  test $VENDOR = MSVC-Win64; then
+		elif test "${VENDOR}" == "MSVC-Win64"; then
 			export CC=cl
 			export CXX=cl
@@ -204,5 +231,5 @@
 			IS_WINDOWS=yes
 			OSLIBS="-Wl,kernel32.lib -Wl,user32.lib -Wl,gdi32.lib -Wl,winspool.lib -Wl,comdlg32.lib -Wl,advapi32.lib -Wl,shell32.lib -Wl,ole32.lib -Wl,oleaut32.lib -Wl,uuid.lib -Wl,odbc32.lib -Wl,odbccp32.lib"
-		elif  test $VENDOR = MSVC-Win64-par; then
+		elif test "${VENDOR}" == "MSVC-Win64-par"; then
 			export CC=cl
 			export CXX=cl
@@ -214,411 +241,472 @@
 			IS_WINDOWS=yes
 			OSLIBS="-Wl,kernel32.lib -Wl,user32.lib -Wl,gdi32.lib -Wl,winspool.lib -Wl,comdlg32.lib -Wl,advapi32.lib -Wl,shell32.lib -Wl,ole32.lib -Wl,oleaut32.lib -Wl,uuid.lib -Wl,odbc32.lib -Wl,odbccp32.lib"
-		elif test $VENDOR = intel-linux; then
+		elif test "${VENDOR}" == "intel-linux"; then
 			export CC=icc
 			export CXX=icpc
-			export CFLAGS=" -D_INTEL_LINUX_"
-			export CXXFLAGS=" -D_INTEL_LINUX_"
-		elif test $VENDOR = intel-gp; then
+			export CFLAGS="-D_INTEL_LINUX_"
+			export CXXFLAGS="-D_INTEL_LINUX_"
+		elif test "${VENDOR}" == "intel-gp"; then
 			export CC=icc
 			export CXX=icpc
-			export CFLAGS=" -D_INTEL_LINUX_"
-			export CXXFLAGS=" -D_INTEL_LINUX_"
-		elif test $VENDOR = intel-lonestar; then
+			export CFLAGS="-D_INTEL_LINUX_"
+			export CXXFLAGS="-D_INTEL_LINUX_"
+		elif test "${VENDOR}" == intel-lonestar; then
 			export CC=icc
 			export CXX=icpc
-		elif test $VENDOR = intel-aurora; then
+		elif test "${VENDOR}" == "intel-aurora"; then
 			export CC=icc
 			export CXX=icpc
-			export CXXFLAGS=" -O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
-			export CFLAGS=" -O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
-		elif test $VENDOR = intel-discover; then
+			export CXXFLAGS="-O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
+			export CFLAGS="-O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
+		elif test "${VENDOR}" == "intel-discover"; then
 			export CC=icc
 			export CXX=icpc
-			export CXXFLAGS=" -O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
-			export CFLAGS=" -O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
-		elif test $VENDOR = intel-pleiades; then
+			export CXXFLAGS="-O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
+			export CFLAGS="-O3 -D_INTEL_LINUX_ -DMPICH_IGNORE_CXX_SEEK"
+		elif test "${VENDOR}" == "intel-pleiades"; then
 			export CC=icc
 			export CXX=icpc
-			export CXXFLAGS=" -O3 -D_INTEL_LINUX_ "
-			export CFLAGS=" -O3 -D_INTEL_LINUX_ "
-		elif test $VENDOR = intel-acenet; then
+			export CXXFLAGS="-O3 -D_INTEL_LINUX_"
+			export CFLAGS="-O3 -D_INTEL_LINUX_"
+		elif test "${VENDOR}" == "intel-acenet"; then
 			export CC=icc
 			export CXX=icpc
-			export CXXFLAGS=" -D_INTEL_LINUX_ "
-			export CFLAGS=" -D_INTEL_LINUX_ "
-		elif test $VENDOR = intel-pleiades-gcc; then
+			export CXXFLAGS="-D_INTEL_LINUX_"
+			export CFLAGS="-D_INTEL_LINUX_"
+		elif test "${VENDOR}" == "intel-pleiades-gcc"; then
 			export CC=gcc
 			export CXX=g++
 			export CXXFLAGS="-O3 -march=corei7-avx"
 			export CFLAGS="-O3 -march=corei7-avx"
-        else
-		AC_MSG_ERROR([unknown compiler vendor!])
+		else
+			AC_MSG_ERROR([unknown compiler vendor!])
 		fi
 	fi
 	AC_SUBST([OSLIBS])
-	AC_MSG_RESULT(done)
-	dnl }}}
-	dnl matlab{{{
-
-	dnl 1. See if matlab has been provided
-	AC_ARG_WITH([matlab-dir],                                         dnl feature
-	AS_HELP_STRING([--with-matlab-dir=DIR],[matlab root directory.]), dnl help string
-	[MATLAB_ROOT=$withval],                                           dnl action if given
-	[MATLAB_ROOT="no"])                                               dnl action if not given
-
-	AC_MSG_CHECKING([whether matlab is enabled])
-	if test "x$MATLAB_ROOT" = "xno" ; then
-		 HAVE_MATLAB=no
+	AC_MSG_RESULT([done])
+
+	AC_MSG_CHECKING([if this is a Mac build])
+	dnl TODO: The following test is a POSIX-compliant way of testing for a
+	dnl		  substring, but is not very readable. Perhaps there is a more
+	dnl		  readable method of achieving the same?
+	if test "${host_os#*\"darwin\"}" == "$host_os"; then
+		IS_MAC=yes
+	fi
+	AM_CONDITIONAL([MAC], [test "${IS_MAC}" == "yes"])
+	AC_MSG_RESULT([${IS_MAC}])
+
+	AC_MSG_CHECKING([if this is a Windows build])
+	AM_CONDITIONAL([WINDOWS], [test "x${IS_WINDOWS}" == "xyes"])
+	AC_MSG_RESULT([${IS_WINDOWS}])
+	dnl }}}
+	dnl MATLAB{{{
+
+	dnl See if MATLAB has been provided
+	AC_ARG_WITH(
+		[matlab-dir],														dnl feature
+		AS_HELP_STRING([--with-matlab-dir=DIR], [MATLAB root directory]),	dnl help string
+		[MATLAB_ROOT=${withval}],											dnl action if given
+		[MATLAB_ROOT="no"]													dnl action if not given
+	)
+	AC_MSG_CHECKING([for MATLAB])
+	if test "x${MATLAB_ROOT}" == "xno"; then
+		HAVE_MATLAB=no
 	else
 		HAVE_MATLAB=yes
-		if ! test -d "$MATLAB_ROOT"; then
-		  AC_MSG_ERROR([matlab directory provided ($MATLAB_ROOT) does not exist]);
-		fi
-		if ! test -f "$MATLAB_ROOT/extern/include/mex.h"; then
-			AC_MSG_ERROR([Couldn't find mex.h... check your installation of matlab])
-	   fi
-	fi
-	AC_MSG_RESULT($HAVE_MATLAB)
-	AM_CONDITIONAL([MATLAB], [test x$HAVE_MATLAB = xyes])
-
-	dnl 2. Get Matlab libraries
-	if test "x$HAVE_MATLAB" = "xyes"; then
-
-		AC_DEFINE([_HAVE_MATLAB_],[1],[with matlab in ISSM src])
-
-		dnl 4. get MEXLIB MEXLINK and MEXEXT (experimental) except for windows
-		AC_MSG_CHECKING([matlab's mex compilation flags])
-  		case "${host_os}" in
-  			*cygwin*)
-  				if  test $VENDOR = intel-win7-32; then
-  					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
-               MEXLINK="-Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win32/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
-					MEXEXT=`$MATLAB_ROOT/bin/mexext.bat`
-					MEXEXT=".$MEXEXT"
-  				elif test $VENDOR = intel-win7-64; then
-  					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
-               MEXLINK="-Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win64/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
+		if ! test -d "${MATLAB_ROOT}"; then
+			AC_MSG_ERROR([MATLAB directory provided (${MATLAB_ROOT}) does not exist!]);
+		fi
+		if ! test -f "${MATLAB_ROOT}/extern/include/mex.h"; then
+			AC_MSG_ERROR([Couldn't find mex.h... check your installation of MATLAB])
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_MATLAB}])
+	AM_CONDITIONAL([MATLAB], [test "x${HAVE_MATLAB}" == "xyes"])
+
+	dnl Set variables
+	if test "x${HAVE_MATLAB}" == "xyes"; then
+		AC_DEFINE([_HAVE_MATLAB_], [1], [with MATLAB in ISSM src])
+
+		dnl Set MEXLIB, MEXLINK, and MEXEXT
+		AC_MSG_CHECKING([MATLAB's mex compilation flags])
+
+		dnl NOTE: We know $VENDOR cannot be empty at this point, so no need to
+		dnl		  check again in the following conditionals
+		dnl
+		case "${host_os}" in
+			*cygwin*)
+				if test "${VENDOR}" == "intel-win7-32"; then
+					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
+					MEXLINK="-Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win32/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
+					MEXEXT=`${MATLAB_ROOT}/bin/mexext.bat`
+					MEXEXT=".${MEXEXT}"
+				elif test "${VENDOR}" == "intel-win7-64"; then
+					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
+					MEXLINK="-Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win64/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
 					MEXEXT=".mexw64"
-  				elif test $VENDOR = MSVC-Win64 || test $VENDOR = MSVC-Win64-par; then
-  					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
-               MEXLINK="-Wl,/link -Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win64/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
-  					MATLABINCL="-I`cygpath -m $MATLAB_ROOT/extern/include/`"
+				elif test "${VENDOR}" == "MSVC-Win64" || test "${VENDOR}" == "MSVC-Win64-par"; then
+					MEXLIB="-Wl,libmx.lib -Wl,libmex.lib -Wl,libmat.lib ${OSLIBS} -Wl,libf2cblas.lib -Wl,libf2clapack.lib"
+					MEXLINK="-Wl,/link -Wl,/LIBPATH:`cygpath -m ${MATLAB_ROOT}/extern/lib/win64/microsoft` -Wl,/link -Wl,/EXPORT:mexFunction -Wl,/DLL"
+					MATLABINCL="-I`cygpath -m ${MATLAB_ROOT}/extern/include`"
 					MEXEXT=".mexw64"
-  				fi
-  			;;
-		   *)
-           MATLABINCL="-I$MATLAB_ROOT/extern/include/"
-           MEXLINK=$($MATLAB_ROOT/bin/mex -v 2>&1 < /dev/null | grep LDFLAGS     | sed -e "s/         LDFLAGS            = //g")
-			  MEXLIB=$( $MATLAB_ROOT/bin/mex -v 2>&1 < /dev/null | grep CXXLIBS     | sed -e "s/         CXXLIBS            = //g")
-		     MEXEXT=$( $MATLAB_ROOT/bin/mex -v 2>&1 < /dev/null | grep LDEXTENSION | sed -e "s/         LDEXTENSION        = //g")
+				fi
+			;;
+			*)
+				MATLABINCL="-I${MATLAB_ROOT}/extern/include"
+				MEXLINK=$(${MATLAB_ROOT}/bin/mex -v 2>&1 < /dev/null | grep LDFLAGS | sed -e "s/         LDFLAGS            = //g")
+				MEXLIB=$(${MATLAB_ROOT}/bin/mex -v 2>&1 < /dev/null | grep CXXLIBS | sed -e "s/         CXXLIBS            = //g")
+				MEXEXT=$(${MATLAB_ROOT}/bin/mex -v 2>&1 < /dev/null | grep LDEXTENSION | sed -e "s/         LDEXTENSION        = //g")
 				dnl version 2014 and up
-				if test "x$MEXEXT" = "x" ; then
-					 echo "#include <mex.h>" > conftest.cpp
-					 echo "void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]){}" >> conftest.cpp
-					 $MATLAB_ROOT/bin/mex -v -lmex conftest.cpp > conftest.tmp 2>&1
-					 rm -f conftest.cpp
-					 MEXLINK=$(cat conftest.tmp | grep LDFLAGS  | sed -e "s/LDFLAGS ://g")
-					 MEXLIB=$( cat conftest.tmp | grep LINKLIBS | sed -e "s/LINKLIBS ://g")
-					 MEXEXT=$( cat conftest.tmp | grep LDEXT    | sed -e "s/LDEXT ://g" | awk '{print $[1]}')
-					 rm -f conftest.tmp
+				if test -z "${MEXEXT}"; then
+					echo "#include <mex.h>" > conftest.cpp
+					echo "void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]){}" >> conftest.cpp
+					${MATLAB_ROOT}/bin/mex -v -lmex conftest.cpp > conftest.tmp 2>&1
+					rm -f conftest.cpp
+					MEXLINK=$(cat conftest.tmp | grep LDFLAGS | sed -e "s/LDFLAGS ://g")
+					MEXLIB=$(cat conftest.tmp | grep LINKLIBS | sed -e "s/LINKLIBS ://g")
+					MEXEXT=$(cat conftest.tmp | grep LDEXT | sed -e "s/LDEXT ://g" | awk '{print $[1]}')
+					rm -f conftest.tmp
 				fi
 
 				dnl Make sure mexFunction.map is not in MEXLIB to avoid problems with global variables
-				dnl MEXLINK=$(echo $MEXLINK | sed -e "s/,-expo.*mexFunction\\.map\"//g" | sed -e "s/-[[^ ]]*mexFunction\\.map//g")
+				dnl MEXLINK=$(echo ${MEXLINK} | sed -e "s/,-expo.*mexFunction\\.map\"//g" | sed -e "s/-[[^ ]]*mexFunction\\.map//g")
 				MEXLINK="" dnl We actually don't need MEXLINK????
-
-  			;;
-      esac
-		AC_MSG_RESULT(done)
-	   if test "x$MEXEXT" = "x" ; then
-			AC_MSG_ERROR([Couldn't find mex... check your installation of matlab])
-	   fi
+			;;
+		esac
+		AC_MSG_RESULT([done])
+		if test -z "${MEXEXT}"; then
+			AC_MSG_ERROR([Couldn't find mex... check your installation of MATLAB])
+		fi
 
 		AC_SUBST([MATLABINCL])
-		MATLABWRAPPEREXT=$MEXEXT
+		MATLABWRAPPEREXT=${MEXEXT}
 		AC_SUBST([MATLABWRAPPEREXT])
-	   AC_SUBST([MEXLIB])
+		AC_SUBST([MEXLIB])
 		AC_SUBST([MEXLINK])
 	fi
 	dnl }}}
-	dnl windows {{{
-	AC_MSG_CHECKING([Checking if this is a Win build... ])
-	AM_CONDITIONAL([WINDOWS], [test x$IS_WINDOWS = xyes])
-	AC_MSG_RESULT(done)
-	dnl }}}
-	dnl javascript{{{
-	AC_ARG_WITH([javascript],
-	  AS_HELP_STRING([--with-javascript], [compile javascript wrappers? default is no.]),
-	  [JAVASCRIPT=$withval],[JAVASCRIPT="no"])
-
-	dnl Check whether javascript wrappers are desired
-	AC_MSG_CHECKING([for javascript])
-	if test "x$JAVASCRIPT" = "xno" ; then
+	dnl JavaScript{{{
+	AC_ARG_WITH(
+		[javascript],
+		AS_HELP_STRING([--with-javascript], [compile JavaScript wrappers? (default: no)]),
+		[JAVASCRIPT=${withval}],
+		[JAVASCRIPT="no"]
+	)
+	AC_MSG_CHECKING([for JavaScript])
+	if test "x${JAVASCRIPT}" == "xno"; then
 		HAVE_JAVASCRIPT=no
 	else
 		HAVE_JAVASCRIPT=yes
-		AC_DEFINE([_HAVE_JAVASCRIPT_],[1],[with javascript])
-	fi
-	AC_MSG_RESULT($HAVE_JAVASCRIPT)
-	AM_CONDITIONAL([JAVASCRIPT],[test x$HAVE_JAVASCRIPT = xyes])
+		AC_DEFINE([_HAVE_JAVASCRIPT_], [1], [with JavaScript])
+	fi
+	AC_MSG_RESULT([${HAVE_JAVASCRIPT}])
+	AM_CONDITIONAL([JAVASCRIPT], [test "x${HAVE_JAVASCRIPT}" == "xyes"])
 	JAVASCRIPTWRAPPEREXT=.js
 	AC_SUBST([JAVASCRIPTWRAPPEREXT])
-
-	dnl }}}
-	dnl triangle {{{
-	AC_ARG_WITH([triangle-dir],
-			  AS_HELP_STRING([--with-triangle-dir=DIR], [triangle root directory.]),
-			 [TRIANGLE_ROOT=$withval],[TRIANGLE_ROOT="no"])
-
-  dnl Check whether triangle is enabled
+	dnl }}}
+	dnl Triangle {{{
+	AC_ARG_WITH(
+		[triangle-dir],
+		AS_HELP_STRING([--with-triangle-dir=DIR], [Triangle root directory]),
+		[TRIANGLE_ROOT=${withval}],
+		[TRIANGLE_ROOT="no"]
+	)
 	AC_MSG_CHECKING([for triangle])
-	if test "x$TRIANGLE_ROOT" = "xno" ; then
+	if test "x${TRIANGLE_ROOT}" == "xno"; then
 		HAVE_TRIANGLE=no
 	else
 		HAVE_TRIANGLE=yes
-		if ! test -d "$TRIANGLE_ROOT"; then
-			AC_MSG_ERROR([triangle directory provided ($TRIANGLE_ROOT) does not exist]);
-		fi
-		if ! test -f "$TRIANGLE_ROOT/triangle.h" ; then
+		if ! test -d "${TRIANGLE_ROOT}"; then
+			AC_MSG_ERROR([Triangle directory provided (${TRIANGLE_ROOT}) does not exist!]);
+		fi
+		if ! test -f "${TRIANGLE_ROOT}/include/triangle.h"; then
 			AC_MSG_ERROR([Couldn't find triangle.h... check your installation of triangle])
 		fi
 	fi
-	AC_MSG_RESULT($HAVE_TRIANGLE)
-	AM_CONDITIONAL([TRIANGLE],[test x$HAVE_TRIANGLE = xyes])
-
-	dnl library and header files
-	if test "x$HAVE_TRIANGLE" = "xyes"; then
-		TRIANGLEINCL=-I$TRIANGLE_ROOT/
-		case "${host_os}" in
-				*cygwin*)
-				TRIANGLEINCL="/I`cygpath -m $TRIANGLE_ROOT/`"
-				TRIANGLELIB="-Wl,`cygpath -m $TRIANGLE_ROOT/`triangle.lib"
-				;;
-				*linux*)
-				if test "x$HAVE_JAVASCRIPT" = "xyes"; then
-					dnl go to the bit code, not the library.
-					TRIANGLELIB=$TRIANGLE_ROOT/triangle.o
-				else
-					TRIANGLELIB=$TRIANGLE_ROOT/triangle.a
-				fi
-				;;
-				*darwin*)
-				if test "x$HAVE_JAVASCRIPT" = "xyes"; then
-					dnl go to the bit code, not the library.
-					TRIANGLELIB=$TRIANGLE_ROOT/triangle.o
-				else
-					TRIANGLELIB=$TRIANGLE_ROOT/triangle.a
-				fi
-				;;
-			esac
-		AC_DEFINE([_HAVE_TRIANGLE_],[1],[with Triangle in ISSM src])
-		AC_SUBST([TRIANGLEINCL])
-		AC_SUBST([TRIANGLELIB])
-	fi
-	dnl }}}
-	dnl boost{{{
-	AC_ARG_WITH([boost-dir],
-	  AS_HELP_STRING([--with-boost-dir=DIR], [boost root directory.]),
-	  [BOOST_ROOT=$withval],[BOOST_ROOT="no"])
-
-	dnl Check whether boost is enabled
-	AC_MSG_CHECKING([for boost])
-	if test "x$BOOST_ROOT" = "xno" ; then
-		HAVE_BOOST=no
-	else
-		HAVE_BOOST=yes
-		if ! test -d "$BOOST_ROOT"; then
-			AC_MSG_ERROR([boost directory provided ($BOOST_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_BOOST)
-	AM_CONDITIONAL([BOOST],[test x$HAVE_BOOST = xyes])
-
-	dnl library and header files
-	if test "x$HAVE_BOOST" = "xyes"; then
-		BOOSTINCL=-I$BOOST_ROOT/include
-		BOOSTLIB="-L$BOOST_ROOT/lib -lboost_python"
-		AC_DEFINE([_HAVE_BOOST_],[1],[with Boost in ISSM src])
-		AC_SUBST([BOOSTINCL])
-		AC_SUBST([BOOSTLIB])
-	fi
-	dnl }}}
-	dnl dakota{{{
-	AC_ARG_WITH([dakota-dir],
-	  AS_HELP_STRING([--with-dakota-dir=DIR], [dakota root directory.]),
-	  [DAKOTA_ROOT=$withval],[DAKOTA_ROOT="no"])
-
-	dnl Check whether dakota is enabled
-	AC_MSG_CHECKING([for dakota])
-	if test "x$DAKOTA_ROOT" = "xno" ; then
-		HAVE_DAKOTA=no
-	else
-		HAVE_DAKOTA=yes
-		if ! test -d "$DAKOTA_ROOT"; then
-			AC_MSG_ERROR([dakota directory provided ($DAKOTA_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_DAKOTA)
-	AM_CONDITIONAL([DAKOTA],[test x$HAVE_DAKOTA = xyes])
-
-	dnl library and header files
-	if test "x$HAVE_DAKOTA" = "xyes"; then
-		DAKOTAINCL=-I$DAKOTA_ROOT/include
-
-		AC_MSG_CHECKING(for dakota version)
-		if test -f "$DAKOTA_ROOT/VERSION"; then
-		 DAKOTA_VERSION=`cat $DAKOTA_ROOT/VERSION | grep 'DAKOTA Version' | sed 's/.*DAKOTA Version //' | sed 's/ .*//' `
-		else if test -f "$DAKOTA_ROOT/../src/src/CommandLineHandler.C"; then
-		 DAKOTA_VERSION=`cat $DAKOTA_ROOT/../src/src/CommandLineHandler.C | grep 'DAKOTA version' | grep 'release' | grep -v // | sed 's/.*DAKOTA version //' | sed 's/ .*//' `
-		else if test -f "$DAKOTA_ROOT/../src/src/CommandLineHandler.cpp"; then
-		 DAKOTA_VERSION=`cat $DAKOTA_ROOT/../src/src/CommandLineHandler.cpp | grep 'DAKOTA version' | grep 'release' | grep -v // | sed 's/.*DAKOTA version //' | sed 's/ .*//' `
-		else
-		 AC_MSG_ERROR([Dakota CommandLineHandler.C or CommandLineHandler.cpp file not found to determine DAKOTA_VERSION!]);
-		fi
-		fi
-		fi
-		AC_MSG_RESULT($DAKOTA_VERSION)
-		AC_DEFINE_UNQUOTED([_DAKOTA_VERSION_],"$DAKOTA_VERSION",[Dakota version number])
-
-		DAKOTAFLAGS=""
-		dnl TODO: Should we also be checking if HAVE_BOOST before adding boost libs?
+	AC_MSG_RESULT([${HAVE_TRIANGLE}])
+	AM_CONDITIONAL([TRIANGLE], [test "x${HAVE_TRIANGLE}" == "xyes"])
+
+	dnl Triangle libraries and header files
+	if test "x${HAVE_TRIANGLE}" == "xyes"; then
+		TRIANGLEINCL=-I${TRIANGLE_ROOT}/include
 		case "${host_os}" in
 			*cygwin*)
-				if test x$DAKOTA_VERSION = x5.1 || test x$DAKOTA_VERSION = x5.2; then
-					DAKOTALIB="-L$DAKOTA_ROOT/lib -L$BOOST_ROOT/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem"
-				else if test x$DAKOTA_VERSION = x6.1 || test x$DAKOTA_VERSION = x6.2; then
-				   DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_COLINY -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_JEGA -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
-					DAKOTALIB="-L$DAKOTA_ROOT/lib -L$BOOST_ROOT/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -llhs_mods -lmoga -loptpp -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lcport -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -ltinyxml -lutilities -lsparsegrid -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
-					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H],[1],[disabling DAKOTA_CONFIG_H])
-					AC_DEFINE([DAKOTA_HAVE_MPI],[1],[enabling parallel MPI])
+				TRIANGLEINCL="/I`cygpath -m ${TRIANGLE_ROOT}/include`"
+				TRIANGLELIB="-Wl,`cygpath -m ${TRIANGLE_ROOT}/lib/libtriangle.lib`"
+			;;
+			*darwin*)
+				if test "x${HAVE_JAVASCRIPT}" == "xyes"; then
+					dnl Link to the object file, not the library
+					TRIANGLELIB=${TRIANGLE_ROOT}/share/triangle.o
 				else
-					AC_MSG_ERROR([Dakota version not found or version ($DAKOTA_VERSION) not supported!]);
-				fi
+					TRIANGLELIB="-L${TRIANGLE_ROOT}/lib -ltriangle"
 				fi
 			;;
 			*linux*)
-				if test x$DAKOTA_VERSION = x5.1 || test x$DAKOTA_VERSION = x5.2; then
-					DAKOTALIB="-L$DAKOTA_ROOT/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem -lboost_system -ldl"
-				else if test x$DAKOTA_VERSION = x5.3 || test x$DAKOTA_VERSION = x5.3.1; then
+				if test "x${HAVE_JAVASCRIPT}" == "xyes"; then
+					dnl Link to the object file, not the library
+					TRIANGLELIB=${TRIANGLE_ROOT}/share/triangle.o
+				else
+					TRIANGLELIB="-L${TRIANGLE_ROOT}/lib -ltriangle"
+				fi
+			;;
+		esac
+		AC_DEFINE([_HAVE_TRIANGLE_], [1], [with Triangle in ISSM src])
+		AC_SUBST([TRIANGLEINCL])
+		AC_SUBST([TRIANGLELIB])
+	fi
+	dnl }}}
+	dnl Boost{{{
+	AC_ARG_WITH(
+		[boost-dir],
+		AS_HELP_STRING([--with-boost-dir=DIR], [Boost root directory]),
+		[BOOST_ROOT=${withval}],
+		[BOOST_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for Boost])
+	if test "x${BOOST_ROOT}" == "xno"; then
+		HAVE_BOOST=no
+	else
+		HAVE_BOOST=yes
+		if ! test -d "${BOOST_ROOT}"; then
+			AC_MSG_ERROR([Boost directory provided (${BOOST_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_BOOST}])
+	AM_CONDITIONAL([BOOST], [test "x${HAVE_BOOST}" == "xyes"])
+
+	dnl Boost libraries and header files
+	if test "x${HAVE_BOOST}" == "xyes"; then
+		BOOSTINCL="-I${BOOST_ROOT}/include"
+		#BOOSTLIB="-L$BOOST_ROOT/lib -lboost_python"
+		AC_MSG_CHECKING(for Boost version)
+		BOOST_VERSION=`cat ${BOOST_ROOT}/include/boost/version.hpp | grep "#define BOOST_VERSION " | sed 's/.*BOOST_VERSION //'`
+		BOOST_VERSION_MAJOR=`expr ${BOOST_VERSION} / 100000`
+		BOOST_VERSION_MINOR=`expr ${BOOST_VERSION} / 100 % 1000`
+		AC_MSG_RESULT([${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}])
+		AC_DEFINE([_HAVE_BOOST_], [1], [with Boost in ISSM src])
+		AC_SUBST([BOOSTINCL])
+		AC_SUBST([BOOSTLIB])
+	fi
+	dnl }}}
+	dnl Dakota{{{
+	AC_ARG_WITH(
+		[dakota-dir],
+		AS_HELP_STRING([--with-dakota-dir=DIR], [Dakota root directory]),
+		[DAKOTA_ROOT=${withval}],
+		[DAKOTA_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for Dakota])
+	if test "x${DAKOTA_ROOT}" == "xno"; then
+		HAVE_DAKOTA=no
+	else
+		HAVE_DAKOTA=yes
+		if ! test -d "${DAKOTA_ROOT}"; then
+			AC_MSG_ERROR([Dakota directory provided (${DAKOTA_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_DAKOTA}])
+	AM_CONDITIONAL([DAKOTA], [test "x${HAVE_DAKOTA}" == "xyes"])
+
+	dnl Dakota libraries and header files
+	if test "x${HAVE_DAKOTA}" == "xyes"; then
+		DAKOTAINCL=-I${DAKOTA_ROOT}/include
+
+		AC_MSG_CHECKING(for Dakota version)
+		dnl TODO:	Check if this method applies to all other versions of
+		dnl 		(it should as long as the Dakota binaries have been
+		dnl 		compiled). If so, we can remove the other methods of
+		dnl 		getting the version.
+		dnl
+		DAKOTA_VERSION_OUTPUT=`${DAKOTA_ROOT}/bin/dakota -v`
+		if test -n "${DAKOTA_VERSION_OUTPUT}"; then
+			DAKOTA_VERSION=`echo ${DAKOTA_VERSION_OUTPUT} grep "Dakota version" | sed 's/Dakota version //' | sed 's/ .*//'`
+		elif test -f "${DAKOTA_ROOT}/VERSION"; then
+			DAKOTA_VERSION=`cat ${DAKOTA_ROOT}/VERSION | grep 'DAKOTA Version' | sed 's/.*DAKOTA Version //' | sed 's/ .*//'`
+		elif test -f "${DAKOTA_ROOT}/../src/src/CommandLineHandler.C"; then
+			DAKOTA_VERSION=`cat ${DAKOTA_ROOT}/../src/src/CommandLineHandler.C | grep 'DAKOTA version' | grep 'release' | grep -v // | sed 's/.*DAKOTA version //' | sed 's/ .*//' `
+		elif test -f "${DAKOTA_ROOT}/../src/src/CommandLineHandler.cpp"; then
+			DAKOTA_VERSION=`cat ${DAKOTA_ROOT}/../src/src/CommandLineHandler.cpp | grep 'DAKOTA version' | grep 'release' | grep -v // | sed 's/.*DAKOTA version //' | sed 's/ .*//' `
+		else
+			AC_MSG_ERROR([Dakota CommandLineHandler.C or CommandLineHandler.cpp file not found to determine DAKOTA_VERSION!]);
+		fi
+		AC_MSG_RESULT([${DAKOTA_VERSION}])
+		AC_DEFINE_UNQUOTED(_DAKOTA_VERSION_, "${DAKOTA_VERSION}", [Dakota version number])
+
+		DAKOTAFLAGS=""
+
+		dnl NOTE:
+		dnl - See $ISSM_DIR/dakota/build/src/Makefile.export.Dakota for the
+		dnl	  flags needed by your combination of Boost and Dakota versions
+		dnl - We know $DAKOTA_ROOT cannot be empty at this point, so no need to
+		dnl   check again in the following conditionals
+		dnl
+		dnl TODO:
+		dnl - Should we also be checking if HAVE_BOOST before adding boost libs?
+		dnl
+		case "${host_os}" in
+			*cygwin*)
+				if test "${DAKOTA_VERSION}" == "5.1" || test "${DAKOTA_VERSION}" == "5.2"; then
+					DAKOTALIB="-L${DAKOTA_ROOT}/lib -L${BOOST_ROOT}/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem"
+				elif test "${DAKOTA_VERSION}" == "6.1" || test "${DAKOTA_VERSION}" == "6.2"; then
 					DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_COLINY -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_JEGA -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
-					DAKOTALIB="-L$DAKOTA_ROOT/lib -L$BOOST_ROOT/lib -ldakota_src -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -lmods -lmoga -loptpp -lsampling -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -lmod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lanalyzer -lbose -lcport -ldace -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -lrandom -ltinyxml -lutilities -lsparsegrid -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
-					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H],[1],[disabling DAKOTA_CONFIG_H])
-					AC_DEFINE([DAKOTA_HAVE_MPI],[1],[enabling parallel MPI])
-				else if test x$DAKOTA_VERSION = x6.1 || test x$DAKOTA_VERSION = x6.2; then
-				   DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
-					if test "x$enable_standalone_executables" = "xyes"; then
-						DAKOTALIB="-L$DAKOTA_ROOT/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid $BOOST_ROOT/lib/libboost_serialization.a $BOOST_ROOT/lib/libboost_signals.a $BOOST_ROOT/lib/libboost_regex.a $BOOST_ROOT/lib/libboost_filesystem.a $BOOST_ROOT/lib/libboost_system.a"
-					else
-						DAKOTALIB="-L$DAKOTA_ROOT/lib -L$BOOST_ROOT/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
-					fi
-					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H],[1],[disabling DAKOTA_CONFIG_H])
-					AC_DEFINE([DAKOTA_HAVE_MPI],[1],[enabling parallel MPI])
+					DAKOTALIB="-L${DAKOTA_ROOT}/lib -L${BOOST_ROOT}/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -llhs_mods -lmoga -loptpp -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lcport -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -ltinyxml -lutilities -lsparsegrid -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling parallel MPI])
 				else
-					AC_MSG_ERROR([Dakota version not found or version ($DAKOTA_VERSION) not supported!]);
-				fi
-				fi
+					AC_MSG_ERROR([Dakota version not found or version (${DAKOTA_VERSION}) not supported!]);
 				fi
 			;;
 			*darwin*)
-				if test x$DAKOTA_VERSION = x5.1 || test x$DAKOTA_VERSION = x5.2; then
-					DAKOTALIB="-L$DAKOTA_ROOT/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
-					dnl DAKOTALIB+= "-lgslcblas -L/usr/lib -lblas -llapack"
-				else if test x$DAKOTA_VERSION = x5.3 || test x$DAKOTA_VERSION = x5.3.1; then
+				if test "${DAKOTA_VERSION}" == "5.1" || test "${DAKOTA_VERSION}" == "5.2"; then
+					DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem -lboost_system -ldl"
+				elif test "${DAKOTA_VERSION}" == "5.3" || test "${DAKOTA_VERSION}" == "5.3.1"; then
 					DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_COLINY -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_JEGA -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
-					DAKOTALIB="-L$DAKOTA_ROOT/lib -L$BOOST_ROOT/lib -ldakota_src -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -lmods -lmoga -loptpp -lsampling -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -lmod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lanalyzer -lbose -lcport -ldace -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -lrandom -ltinyxml -lutilities -lsparsegrid -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
-					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H],[1],[disabling DAKOTA_CONFIG_H])
-					AC_DEFINE([DAKOTA_HAVE_MPI],[1],[enabling parallel MPI])
-				else if test x$DAKOTA_VERSION = x6.1 || test x$DAKOTA_VERSION = x6.2; then
-					DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
-					if test "x$enable_standalone_executables" = "xyes"; then
-						DAKOTALIB="-L$DAKOTA_ROOT/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid $BOOST_ROOT/lib/libboost_serialization.a $BOOST_ROOT/lib/libboost_signals.a $BOOST_ROOT/lib/libboost_regex.a $BOOST_ROOT/lib/libboost_filesystem.a $BOOST_ROOT/lib/libboost_system.a"
-					else
-						DAKOTALIB="-L$DAKOTA_ROOT/lib -L$BOOST_ROOT/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
+					DAKOTALIB="-L${DAKOTA_ROOT}/lib -L${BOOST_ROOT}/lib -ldakota_src -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -lmods -lmoga -loptpp -lsampling -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -lmod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lanalyzer -lbose -lcport -ldace -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -lrandom -ltinyxml -lutilities -lsparsegrid -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling Dakota with MPI])
+				elif test "${DAKOTA_VERSION}" == "6.1" || test "${DAKOTA_VERSION}" == "6.2"; then
+					if test "${BOOST_VERSION_MAJOR}" == "1"; then
+						if test "${BOOST_VERSION_MINOR}" == "55"; then
+							DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid -L${BOOST_ROOT}/lib -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system ${BLASLAPACKLIB}"
+						elif test "${BOOST_VERSION_MINOR}" = "72"; then
+							DAKOTAFLAGS="-DHAVE_CONFIG_H -DHAVE_CONFIG_H -DDISABLE_DAKOTA_CONFIG_H -DBOOST_DISABLE_ASSERTS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DHAVE_ADAPTIVE_SAMPLING -DHAVE_ESM -DHAVE_CONMIN -DHAVE_DDACE -DHAVE_DREAM -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_NOMAD -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							dnl See $ISSM_DIR/dakota/build/src/Makefile.export.Dakota -> Dakota_LIBRARIES
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldakota_src_fortran -lnidr -lteuchos -lpecos -lpecos_src -llhs -llhs_mods -llhs_mod -ldfftpack -lsparsegrid -lsurfpack -lsurfpack -lsurfpack_fortran -lconmin -lddace -ldream -lfsudace -lhopspack -lncsuopt -lcport -lnomad -loptpp -lpsuade -lamplsolver -L${BOOST_ROOT}/lib -lboost_filesystem -lboost_program_options -lboost_regex -lboost_serialization -lboost_system ${BLASLAPACKLIB}"
+						fi
 					fi
-					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H],[1],[disabling DAKOTA_CONFIG_H])
-					AC_DEFINE([DAKOTA_HAVE_MPI],[1],[enabling parallel MPI])
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling Dakota with MPI])
+				elif test "${DAKOTA_VERSION}" == "6.11"; then
+					if test "${BOOST_VERSION_MAJOR}" == "1"; then
+						if test "${BOOST_VERSION_MINOR}" == "55"; then
+							DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid -L$BOOST_ROOT/lib -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system ${BLASLAPACKLIB}"
+						elif test "${BOOST_VERSION_MINOR}" == "72"; then
+							DAKOTAFLAGS="-DHAVE_CONFIG_H -DHAVE_CONFIG_H -DDISABLE_DAKOTA_CONFIG_H -DBOOST_DISABLE_ASSERTS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DHAVE_ADAPTIVE_SAMPLING -DHAVE_ESM -DHAVE_CONMIN -DHAVE_DDACE -DHAVE_DREAM -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_NOMAD -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldakota_src_fortran -lnidr -lteuchosremainder -lteuchosnumerics -lteuchoscomm -lteuchosparameterlist -lteuchosparser -lteuchoscore -lpecos_util -lpecos_src -llhs -llhs_mods -llhs_mod -ldfftpack -lsparsegrid -lsurfpack -lsurfpack -lsurfpack_fortran -lapproxnn -lconmin -lddace -ldream -lfsudace -lhopspack -lncsuopt -lcport -lnomad -loptpp -lpsuade -lamplsolver -L${BOOST_ROOT}/lib -lboost_filesystem -lboost_program_options -lboost_regex -lboost_serialization -lboost_system ${BLASLAPACKLIB}"
+						fi
+					fi
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling Dakota with MPI])
 				else
-					AC_MSG_ERROR([Dakota version not found or version ($DAKOTA_VERSION) not supported!]);
+					AC_MSG_ERROR([Dakota version not found or version (${DAKOTA_VERSION}) not supported!]);
 				fi
-				fi
+			;;
+			*linux*)
+				if test "${DAKOTA_VERSION}" == "5.1" || test "${DAKOTA_VERSION}" == "5.2"; then
+					DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota -lteuchos -lpecos -llhs -lsparsegrid -lsurfpack -lconmin -lddace -lfsudace -ljega -lcport -loptpp -lpsuade -lncsuopt -lcolin -linterfaces -lmomh -lscolib -lpebbl -ltinyxml -lutilib -l3po -lhopspack -lnidr -lamplsolver -lboost_signals -lboost_regex -lboost_filesystem -lboost_system -ldl"
+				elif test "${DAKOTA_VERSION}" == "5.3" || test "${DAKOTA_VERSION}" == "5.3.1"; then
+					DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_COLINY -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_JEGA -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+					DAKOTALIB="-L${DAKOTA_ROOT}/lib -L${BOOST_ROOT}/lib -ldakota_src -lpecos_src -lscolib -ljega_fe -llhs -lpebbl -lcolin -linterfaces -lmods -lmoga -loptpp -lsampling -lsoga -lsurfpack -lutilib -lconmin -ldakota_src_fortran -lmod -lncsuopt -lsurfpack_fortran -lteuchos -l3po -lamplsolver -lanalyzer -lbose -lcport -ldace -ldfftpack -leutils -lfsudace -lhopspack -ljega -lnidr -lpecos -lpsuade -lrandom -ltinyxml -lutilities -lsparsegrid -lboost_signals -lboost_regex -lboost_filesystem -lboost_system"
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling Dakota with MPI])
+				elif test "${DAKOTA_VERSION}" == "6.1" || test "${DAKOTA_VERSION}" == "6.2"; then
+					if test "${BOOST_VERSION_MAJOR}" == "1"; then
+						if test "${BOOST_VERSION_MINOR}" == "55"; then
+							DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid -L${BOOST_ROOT}/lib -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system ${BLASLAPACKLIB}"
+						elif test "${BOOST_VERSION_MINOR}" = "72"; then
+							DAKOTAFLAGS="-DHAVE_CONFIG_H -DHAVE_CONFIG_H -DDISABLE_DAKOTA_CONFIG_H -DBOOST_DISABLE_ASSERTS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DHAVE_ADAPTIVE_SAMPLING -DHAVE_ESM -DHAVE_CONMIN -DHAVE_DDACE -DHAVE_DREAM -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_NOMAD -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							dnl See $ISSM_DIR/dakota/build/src/Makefile.export.Dakota -> Dakota_LIBRARIES
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldakota_src_fortran -lnidr -lteuchos -lpecos -lpecos_src -llhs -llhs_mods -llhs_mod -ldfftpack -lsparsegrid -lsurfpack -lsurfpack -lsurfpack_fortran -lconmin -lddace -ldream -lfsudace -lhopspack -lncsuopt -lcport -lnomad -loptpp -lpsuade -lamplsolver -L${BOOST_ROOT}/lib -lboost_filesystem -lboost_program_options -lboost_regex -lboost_serialization -lboost_system ${BLASLAPACKLIB}"
+						fi
+					fi
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling Dakota with MPI])
+				elif test "${DAKOTA_VERSION}" == "6.11"; then
+					if test "${BOOST_VERSION_MAJOR}" == "1"; then
+						if test "${BOOST_VERSION_MINOR}" == "55"; then
+							DAKOTAFLAGS="-DDISABLE_DAKOTA_CONFIG_H -DBOOST_MULTI_INDEX_DISABLE_SERIALIZATION -DDAKOTA_PLUGIN -DBOOST_DISABLE_ASSERTS -DDAKOTA_HAVE_BOOST_FS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DDAKOTA_UTILIB -DHAVE_ADAPTIVE_SAMPLING -DHAVE_CONMIN -DDAKOTA_DDACE -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldream -lfsudace -lddace -lnomad -lpecos_src -llhs -llhs_mods -loptpp -lsurfpack -lconmin -ldakota_src_fortran -llhs_mod -lncsuopt -lsurfpack_fortran -lteuchos -lamplsolver -lcport -ldfftpack -lfsudace -lhopspack -lnidr -lpecos -lpsuade -lsparsegrid -L$BOOST_ROOT/lib -lboost_serialization -lboost_signals -lboost_regex -lboost_filesystem -lboost_system ${BLASLAPACKLIB}"
+						elif test "${BOOST_VERSION_MINOR}" == "72"; then
+							DAKOTAFLAGS="-DHAVE_CONFIG_H -DHAVE_CONFIG_H -DDISABLE_DAKOTA_CONFIG_H -DBOOST_DISABLE_ASSERTS -DHAVE_UNISTD_H -DHAVE_SYSTEM -DHAVE_WORKING_FORK -DHAVE_WORKING_VFORK -DHAVE_SYS_WAIT_H -DHAVE_USLEEP -DDAKOTA_F90 -DDAKOTA_HAVE_MPI -DHAVE_PECOS -DHAVE_SURFPACK -DHAVE_ADAPTIVE_SAMPLING -DHAVE_ESM -DHAVE_CONMIN -DHAVE_DDACE -DHAVE_DREAM -DHAVE_FSUDACE -DDAKOTA_HOPS -DHAVE_NCSU -DHAVE_NL2SOL -DHAVE_NOMAD -DHAVE_OPTPP -DDAKOTA_OPTPP -DHAVE_PSUADE -DHAVE_AMPL"
+							DAKOTALIB="-L${DAKOTA_ROOT}/lib -ldakota_src -ldakota_src_fortran -lnidr -lteuchosremainder -lteuchosnumerics -lteuchoscomm -lteuchosparameterlist -lteuchosparser -lteuchoscore -lpecos_util -lpecos_src -llhs -llhs_mods -llhs_mod -ldfftpack -lsparsegrid -lsurfpack -lsurfpack -lsurfpack_fortran -lapproxnn -lconmin -lddace -ldream -lfsudace -lhopspack -lncsuopt -lcport -lnomad -loptpp -lpsuade -lamplsolver -L${BOOST_ROOT}/lib -lboost_filesystem -lboost_program_options -lboost_regex -lboost_serialization -lboost_system ${BLASLAPACKLIB}"
+						fi
+					fi
+					AC_DEFINE([DISABLE_DAKOTA_CONFIG_H], [1], [disabling DAKOTA_CONFIG_H])
+					AC_DEFINE([DAKOTA_HAVE_MPI], [1], [enabling Dakota with MPI])
+				else
+					AC_MSG_ERROR([Dakota version not found or version (${DAKOTA_VERSION}) not supported!]);
 				fi
 			;;
 		esac
 
-		case $DAKOTA_VERSION in
-			@<:@1-9@:>@.@<:@0-9@:>@.@<:@0-9@:>@)
-				DAKOTA_MAJOR=`echo $DAKOTA_VERSION | sed -e 's/^\(@<:@0-9@:>@*\)\..*/\1/'`
-				DAKOTA_MINOR=`echo $DAKOTA_VERSION | sed -e 's/^@<:@0-9@:>@*\.\(@<:@0-9@:>@*\)\..*/\1/'`
-				DAKOTA_BUILD=`echo $DAKOTA_VERSION | sed -e 's/^@<:@0-9@:>@*\.@<:@0-9@:>@*\.\(@<:@0-9@:>@*\).*/\1/'`
+		case ${DAKOTA_VERSION} in
+			@<:@1-9@:>@*.@<:@0-9@:>@*.@<:@0-9@:>@*)
+				DAKOTA_MAJOR=`echo ${DAKOTA_VERSION} | sed -e 's/^\(@<:@0-9@:>@*\)\..*/\1/'`
+				DAKOTA_MINOR=`echo ${DAKOTA_VERSION} | sed -e 's/^@<:@0-9@:>@*\.\(@<:@0-9@:>@*\)\..*/\1/'`
+				DAKOTA_BUILD=`echo ${DAKOTA_VERSION} | sed -e 's/^@<:@0-9@:>@*\.@<:@0-9@:>@*\.\(@<:@0-9@:>@*\).*/\1/'`
 			;;
-			@<:@1-9@:>@.@<:@0-9@:>@)
-				DAKOTA_MAJOR=`echo $DAKOTA_VERSION | sed -e 's/^\(@<:@0-9@:>@*\)\..*/\1/'`
-				DAKOTA_MINOR=`echo $DAKOTA_VERSION | sed -e 's/^@<:@0-9@:>@*\.\(@<:@0-9@:>@*\).*/\1/'`
-				DAKOTA_BUILD=0
-			;;
-			@<:@1-9@:>@.@<:@0-9@:>@+)
-				DAKOTA_MAJOR=`echo $DAKOTA_VERSION | sed -e 's/^\(@<:@0-9@:>@*\)\..*/\1/'`
-				DAKOTA_MINOR=`echo $DAKOTA_VERSION | sed -e 's/^@<:@0-9@:>@*\.\(@<:@0-9@:>@*\).*/\1/'`
+			@<:@1-9@:>@*.@<:@0-9@:>@*)
+				DAKOTA_MAJOR=`echo ${DAKOTA_VERSION} | sed -e 's/^\(@<:@0-9@:>@*\)\..*/\1/'`
+				DAKOTA_MINOR=`echo ${DAKOTA_VERSION} | sed -e 's/^@<:@0-9@:>@*\.\(@<:@0-9@:>@*\).*/\1/'`
 				DAKOTA_BUILD=0
 			;;
 			*)
-				AC_MSG_ERROR([Dakota version ($DAKOTA_VERSION) not supported!]);
-		   ;;
+				AC_MSG_ERROR([Dakota version (${DAKOTA_VERSION}) not supported!])
+			;;
 		esac
-		AC_MSG_CHECKING(for dakota major version)
-		AC_MSG_RESULT($DAKOTA_MAJOR)
-		AC_DEFINE_UNQUOTED([_DAKOTA_MAJOR_],$DAKOTA_MAJOR,[Dakota major version number])
-		AC_MSG_CHECKING(for dakota minor version)
-		AC_MSG_RESULT($DAKOTA_MINOR)
-		AC_DEFINE_UNQUOTED([_DAKOTA_MINOR_],$DAKOTA_MINOR,[Dakota minor version number])
+		AC_MSG_CHECKING(for Dakota major version)
+		AC_MSG_RESULT(${DAKOTA_MAJOR})
+		AC_DEFINE_UNQUOTED(_DAKOTA_MAJOR_, $DAKOTA_MAJOR, [Dakota major version number])
+		AC_MSG_CHECKING(for Dakota minor version)
+		AC_MSG_RESULT(${DAKOTA_MINOR})
+		AC_DEFINE_UNQUOTED(_DAKOTA_MINOR_, $DAKOTA_MINOR, [Dakota minor version number])
 		AC_MSG_CHECKING(for dakota build version)
-		AC_MSG_RESULT($DAKOTA_BUILD)
-		AC_DEFINE_UNQUOTED([_DAKOTA_BUILD_],$DAKOTA_BUILD,[Dakota build version number])
-
-		AC_DEFINE([_HAVE_DAKOTA_],[1],[with Dakota in ISSM src])
+		AC_MSG_RESULT(${DAKOTA_BUILD})
+		AC_DEFINE_UNQUOTED(_DAKOTA_BUILD_, $DAKOTA_BUILD, [Dakota build version number])
+
+		AC_DEFINE([_HAVE_DAKOTA_], [1], [with Dakota in ISSM src])
 		AC_SUBST([DAKOTAINCL])
 		AC_SUBST([DAKOTAFLAGS])
 		AC_SUBST([DAKOTALIB])
 	fi
-	AM_CONDITIONAL([ISSM_DAKOTA],[test x$DAKOTA_MAJOR = x6])
-	dnl }}}
-	dnl python{{{
-	AC_ARG_WITH([python-dir],
-	  AS_HELP_STRING([--with-python-dir=DIR], [python root directory.]),
-	  [PYTHON_ROOT=$withval],[PYTHON_ROOT="no"])
-
-	AC_ARG_WITH([python-version],
-	  AS_HELP_STRING([--with-python-version=DIR], [python forced version.]),
-	  [PYTHON_VERSION=$withval],[PYTHON_VERSION="no"])
-
-	dnl Check whether python is enabled
-	AC_MSG_CHECKING([for python])
-	if test "x$PYTHON_ROOT" = "xno" ; then
+	AM_CONDITIONAL([ISSM_DAKOTA], [test "x${DAKOTA_MAJOR}" == "x6"])
+	dnl }}}
+	dnl Python{{{
+	AC_ARG_WITH(
+		[python-dir],
+		AS_HELP_STRING([--with-python-dir=DIR], [Python root directory]),
+		[PYTHON_ROOT=${withval}],
+		[PYTHON_ROOT="no"]
+	)
+
+	AC_ARG_WITH(
+		[python-version],
+		AS_HELP_STRING([--with-python-version=DIR], [Python forced version]),
+		[PYTHON_VERSION=${withval}],
+		[PYTHON_VERSION="no"]
+	)
+	AC_MSG_CHECKING([for Python])
+	if test "x${PYTHON_ROOT}" == "xno"; then
 		HAVE_PYTHON=no
 		HAVE_PYTHON3=no
 	else
 		HAVE_PYTHON=yes
-		if ! test -d "$PYTHON_ROOT"; then
-			AC_MSG_ERROR([python directory provided ($PYTHON_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_PYTHON)
-	AM_CONDITIONAL([PYTHON],[test x$HAVE_PYTHON = xyes])
-
-	dnl python specifics
-	if test "x$HAVE_PYTHON" = "xyes"; then
-		if test "x$PYTHON_VERSION" = "xno" ; then
-			AC_MSG_CHECKING([for python version])
-			dnl Query Python for its version number. Getting [:3] seems to be the
-			dnl best way to do this; it's what "site.py" does in the standard library.
-			PYTHON_VERSION=$($PYTHON_ROOT/bin/python -c "import sys; print sys.version[[:3]]")
-			AC_MSG_RESULT($PYTHON_VERSION)
+		if ! test -d "${PYTHON_ROOT}"; then
+			AC_MSG_ERROR([Python directory provided (${PYTHON_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_PYTHON}])
+	AM_CONDITIONAL([PYTHON], [test "x${HAVE_PYTHON}" == "xyes"])
+
+	dnl Python specifics
+	if test "x${HAVE_PYTHON}" == "xyes"; then
+		if test "x${PYTHON_VERSION}" == "xno"; then
+			AC_MSG_CHECKING([for Python version])
+			dnl Query Python for its version number. Getting [:3] seems to be
+			dnl the best way to do this: it's what "site.py" does in the
+			dnl standard library.
+			PYTHON_VERSION=$(${PYTHON_ROOT}/bin/python -c "import sys; print(sys.version[[:3]])")
+			AC_MSG_RESULT([${PYTHON_VERSION}])
 		else
-			AC_MSG_RESULT([enforced  python version is ]$PYTHON_VERSION)
-		fi
-		dnl recover major
+			AC_MSG_RESULT([enforced Python version is ${PYTHON_VERSION}])
+		fi
+		dnl Determine major version
 		PYTHON_MAJOR=${PYTHON_VERSION%.*}
-		AC_DEFINE_UNQUOTED([_PYTHON_MAJOR_],$PYTHON_MAJOR,[python version major])
-		if test "x$PYTHON_MAJOR" = "x3"; then
+		AC_DEFINE_UNQUOTED(_PYTHON_MAJOR_, $PYTHON_MAJOR, [Python version major])
+		if test "x${PYTHON_MAJOR}" == "x3"; then
 			HAVE_PYTHON3="yes"
 		else
@@ -626,629 +714,656 @@
 		fi
 
-		AC_MSG_CHECKING([for python header file Python.h])
-		dnl Python.h mighty be in different locations:
-		if test -f "$PYTHON_ROOT/include/Python.h"; then
-			PYTHONINCL=-I$PYTHON_ROOT/include
-		else if test -f "$PYTHON_ROOT/include/python$PYTHON_VERSION/Python.h"; then
-			PYTHONINCL=-I$PYTHON_ROOT/include/python$PYTHON_VERSION
-		else if test -f "$PYTHON_ROOT/include/python$PYTHON_VERSIONm/Python.h"; then
-			PYTHONINCL=-I$PYTHON_ROOT/include/python$PYTHON_VERSIONm
+		AC_MSG_CHECKING([for Python header file Python.h])
+		dnl Python.h might be in different locations:
+		if test -f "${PYTHON_ROOT}/include/Python.h"; then
+			PYTHONINCL=-I${PYTHON_ROOT}/include
+		elif test -f "${PYTHON_ROOT}/include/python${PYTHON_VERSION}/Python.h"; then
+			PYTHONINCL=-I${PYTHON_ROOT}/include/python${PYTHON_VERSION}
+		elif test -f "${PYTHON_ROOT}/include/python${PYTHON_VERSION}m/Python.h"; then
+			PYTHONINCL=-I${PYTHON_ROOT}/include/python${PYTHON_VERSION}m
 		else
 			AC_MSG_ERROR([Python.h not found, locate this file and contact ISSM developers]);
 		fi
-		fi
-		fi
-		AC_MSG_RESULT(found)
-		if test "x$PYTHON_MAJOR" = "x3"; then
-				PYTHONLIB="-L$PYTHON_ROOT/lib -lpython$PYTHON_VERSION""m"
+		AC_MSG_RESULT([found])
+		if test "x${PYTHON_MAJOR}" == "x3"; then
+			PYTHONLIB="-L${PYTHON_ROOT}/lib -lpython${PYTHON_VERSION}m"
 		else
-				PYTHONLIB="-L$PYTHON_ROOT/lib -lpython$PYTHON_VERSION"
+			PYTHONLIB="-L${PYTHON_ROOT}/lib -lpython${PYTHON_VERSION}"
 		fi
 		PYTHONEXT=.so
 		case "${host_os}" in
 			*cygwin*)
-			PYTHONLINK="-shared"
+				PYTHONLINK="-shared"
 			;;
 			*linux*)
-			PYTHONLINK="-shared"
+				PYTHONLINK="-shared"
 			;;
 			*darwin*)
-			PYTHONLINK="-dynamiclib"
+				PYTHONLINK="-dynamiclib"
 			;;
 		esac
-		AC_DEFINE([_HAVE_PYTHON_],[1],[with python in ISSM src])
+		AC_DEFINE([_HAVE_PYTHON_], [1], [with Python in ISSM src])
 		AC_SUBST([PYTHONINCL])
 		AC_SUBST([PYTHONLIB])
-		PYTHONWRAPPEREXT=$PYTHONEXT
+		PYTHONWRAPPEREXT=${PYTHONEXT}
 		AC_SUBST([PYTHONWRAPPEREXT])
 		AC_SUBST([PYTHONLINK])
 	fi
-	AM_CONDITIONAL([PYTHON3], [test x$HAVE_PYTHON3 = xyes])
-	dnl }}}
-	dnl python-numpy{{{
-	AC_ARG_WITH([python-numpy-dir],
-	  AS_HELP_STRING([--with-python-numpy-dir=DIR], [python-numpy root directory.]),
-	  [PYTHON_NUMPY_ROOT=$withval],[PYTHON_NUMPY_ROOT="no"])
-
-	dnl you can find numpy by typing
-	dnl >>> import numpy
-	dnl >>> numpy.__file__
-	dnl '/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy/__init__.pyc'
-
-	dnl Check whether numpy is enabled
+	AM_CONDITIONAL([PYTHON3], [test "x${HAVE_PYTHON3}" == "xyes"])
+	dnl }}}
+	dnl NumPy{{{
+	dnl TODO:
+	dnl - Replace references to python-numpy with numpy (and similar terms)
+	dnl	  project-wide
+	dnl
+	AC_ARG_WITH(
+		[python-numpy-dir],
+		AS_HELP_STRING([--with-python-numpy-dir=DIR], [python-numpy root directory]),
+		[PYTHON_NUMPY_ROOT=${withval}],
+		[PYTHON_NUMPY_ROOT="no"]
+	)
+
+	dnl NOTE: You can find NumPy by running,
+	dnl
+	dnl		>>> import numpy
+	dnl		>>> numpy.__file__
+	dnl
+
 	AC_MSG_CHECKING(for python-numpy)
-	if test "x$PYTHON_NUMPY_ROOT" = "xno" ; then
+	if test "x${PYTHON_NUMPY_ROOT}" == "xno"; then
 		HAVE_PYTHON_NUMPY=no
 	else
 		HAVE_PYTHON_NUMPY=yes
-		if ! test -d "$PYTHON_NUMPY_ROOT"; then
-			AC_MSG_ERROR([numpy directory provided ($PYTHON_NUMPY_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_PYTHON_NUMPY)
-
-	dnl numpy lib
-	if test "x$HAVE_PYTHON_NUMPY" = "xyes"; then
-		PYTHON_NUMPYINCL="-I$PYTHON_NUMPY_ROOT -I$PYTHON_NUMPY_ROOT/core/include/numpy"
-		AC_DEFINE([_HAVE_PYTHON_NUMPY_],[1],[with Python-Numpy in ISSM src])
+		if ! test -d "${PYTHON_NUMPY_ROOT}"; then
+			AC_MSG_ERROR([NumPy directory provided (${PYTHON_NUMPY_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_PYTHON_NUMPY}])
+
+	dnl NumPy libraries and header files
+	if test "x${HAVE_PYTHON_NUMPY}" == "xyes"; then
+		PYTHON_NUMPYINCL="-I${PYTHON_NUMPY_ROOT} -I${PYTHON_NUMPY_ROOT}/core/include/numpy"
+		AC_DEFINE([_HAVE_PYTHON_NUMPY_], [1], [with NumPy in ISSM src])
 		AC_SUBST([PYTHON_NUMPYINCL])
 	fi
 	dnl }}}
-	dnl chaco{{{
-	AC_ARG_WITH([chaco-dir],
-	  AS_HELP_STRING([--with-chaco-dir=DIR], [chaco root directory.]),
-	  [CHACO_ROOT=$withval],[CHACO_ROOT="no"])
-
-	dnl Check whether chaco is enabled
-	AC_MSG_CHECKING([for chaco])
-	if test "x$CHACO_ROOT" = "xno" ; then
+	dnl Chaco{{{
+	AC_ARG_WITH(
+		[chaco-dir],
+		AS_HELP_STRING([--with-chaco-dir=DIR], [Chaco root directory]),
+		[CHACO_ROOT=${withval}],
+		[CHACO_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for Chaco])
+	if test "x${CHACO_ROOT}" == "xno"; then
 		HAVE_CHACO=no
 	else
 		HAVE_CHACO=yes
-		if ! test -d "$CHACO_ROOT"; then
-			AC_MSG_ERROR([chaco directory provided ($CHACO_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_CHACO)
-	AM_CONDITIONAL([CHACO],[test x$HAVE_CHACO = xyes])
-
-	dnl library and header files
-	if test "x$HAVE_CHACO" = "xyes"; then
-		CHACOINCL=-I$CHACO_ROOT/include
-		CHACOLIB="-L$CHACO_ROOT/lib -lchacominusblas"
-		AC_DEFINE([_HAVE_CHACO_],[1],[with Chaco in ISSM src])
+		if ! test -d "${CHACO_ROOT}"; then
+			AC_MSG_ERROR([Chaco directory provided (${CHACO_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_CHACO}])
+	AM_CONDITIONAL([CHACO], [test "x${HAVE_CHACO}" == "xyes"])
+
+	dnl Chaco libraries and header files
+	if test "x${HAVE_CHACO}" == "xyes"; then
+		CHACOINCL=-I${CHACO_ROOT}/include
+		CHACOLIB="-L${CHACO_ROOT}/lib -lchacominusblas"
+		AC_DEFINE([_HAVE_CHACO_], [1], [with Chaco in ISSM src])
 		AC_SUBST([CHACOINCL])
 		AC_SUBST([CHACOLIB])
 	fi
 	dnl }}}
-	dnl scotch{{{
-	AC_ARG_WITH([scotch-dir],
-	  AS_HELP_STRING([--with-scotch-dir=DIR], [scotch root directory.]),
-	  [SCOTCH_ROOT=$withval],[SCOTCH_ROOT="no"])
-
-  dnl Check whether scotch is enabled
-	AC_MSG_CHECKING([for scotch])
-	if test "x$SCOTCH_ROOT" = "xno" ; then
+	dnl ESMF{{{
+	AC_ARG_WITH(
+		[esmf-dir],
+		AS_HELP_STRING([--with-esmf-dir=DIR], [ESMF root directory]),
+		[ESMF_ROOT=${withval}],
+		[ESMF_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for ESMF])
+	if test "x${ESMF_ROOT}" == "xno"; then
+		HAVE_ESMF=no
+	else
+		HAVE_ESMF=yes
+		if ! test -d "${ESMF_ROOT}"; then
+			AC_MSG_ERROR([ESMF directory provided (${ESMF_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_ESMF}])
+
+	dnl ESMF libraries and header files
+	if test "x${HAVE_ESMF}" == "xyes"; then
+		ESMFINCL="-I${ESMF_ROOT}/include"
+		ESMFLIB="-L${ESMF_ROOT}/lib -lesmf"
+		AC_DEFINE([_HAVE_ESMF_], [1], [with ESMF in ISSM src])
+		AC_SUBST([ESMFINCL])
+		AC_SUBST([ESMFLIB])
+	fi
+	AM_CONDITIONAL([ESMF], [test "x${HAVE_ESMF}" == "xyes"])
+	dnl }}}
+	dnl CoDiPack{{{
+	AC_ARG_WITH(
+		[codipack-dir],
+		AS_HELP_STRING([--with-codipack-dir=DIR], [CoDiPack root directory]),
+		[CODIPACK_ROOT=${withval}],
+		[CODIPACK_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for CoDiPack])
+	if test "x${CODIPACK_ROOT}" == "xno"; then
+		HAVE_CODIPACK=no
+	else
+		HAVE_CODIPACK=yes
+		if ! test -d "${CODIPACK_ROOT}"; then
+			AC_MSG_ERROR([CoDiPack directory provided (${CODIPACK_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_CODIPACK}])
+
+	dnl CoDiPack libraries and header files
+	if test "x${HAVE_CODIPACK}" == "xyes"; then
+		CODIPACKINCL="-I${CODIPACK_ROOT}/include"
+		AC_DEFINE([_HAVE_CODIPACK_], [1], [with CoDiPack in ISSM src])
+		AC_DEFINE([_HAVE_AD_], [1], [with AD in ISSM src])
+		AC_SUBST([CODIPACKINCL])
+	fi
+	AM_CONDITIONAL([CODIPACK], [test "x${HAVE_CODIPACK}" == "xyes"])
+	AM_COND_IF(CODIPACK, [CXXFLAGS+=" -std=c++11"])
+	dnl }}}
+	dnl Tape Allocation {{{
+	AC_ARG_ENABLE(
+		[tape-alloc],																dnl feature
+		AS_HELP_STRING([--enable-tape-alloc], [turn tape allocation support on]),
+		[enable_tape_alloc=${enableval}],
+		[enable_tape_alloc=no]
+	)
+	AC_MSG_CHECKING(for tape allocation)
+	if test "x${enable_tape_alloc}" == "xyes"; then
+		AC_DEFINE([_AD_TAPE_ALLOC_], [1], [enable a priori tape allocation for AD])
+	fi
+	AC_MSG_RESULT([${enable_tape_alloc}])
+	dnl }}}
+	dnl ADOL-C {{{
+	AC_ARG_WITH(
+		[adolc-dir],
+		AS_HELP_STRING([--with-adolc-dir=DIR], [ADOL-C root directory]),
+		[ADOLC_ROOT=${withval}],
+		[ADOLC_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for ADOL-C])
+	if test "x${ADOLC_ROOT}" == "xno"; then
+		HAVE_ADOLC=no
+	else
+		HAVE_ADOLC=yes
+		if ! test -d "${ADOLC_ROOT}"; then
+			AC_MSG_ERROR([ADOL-C directory provided (${ADOLC_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_ADOLC}])
+
+	dnl ADOL-C libraries and header files
+	if test "x${HAVE_ADOLC}" == "xyes"; then
+		ADOLCINCL="-I${ADOLC_ROOT}/include"
+		dnl ADOLCLIB="-L${ADOLC_ROOT}/lib64 -ladolc" used to be the path
+		ADOLCLIB="-L${ADOLC_ROOT}/lib -ladolc"
+		AC_DEFINE([_HAVE_ADOLC_], [1], [with ADOL-C in ISSM src])
+		AC_DEFINE([_HAVE_AD_], [1], [with AD in ISSM src])
+		AC_SUBST([ADOLCINCL])
+		AC_SUBST([ADOLCLIB])
+	fi
+	AM_CONDITIONAL([ADOLC], [test "x${HAVE_ADOLC}" == "xyes"])
+	AM_COND_IF(ADOLC, [CXXFLAGS+=" -std=c++11"])
+	dnl }}}
+	dnl ADOL-C version{{{
+	AC_ARG_WITH(
+		[adolc-version],
+		AS_HELP_STRING([--with-adolc-version=number], [ADOL-C version]),
+		[ADOLC_VERSION=${withval}],
+		[ADOLC_VERSION=2]
+	)
+	AC_MSG_CHECKING(for ADOL-C version)
+
+	AC_DEFINE_UNQUOTED(_ADOLC_VERSION_, $ADOLC_VERSION, [ADOL-C version])
+	AC_MSG_RESULT(${ADOLC_VERSION})
+	dnl }}}
+	dnl ADIC2 {{{
+	AC_ARG_WITH(
+		[adic2-dir],
+		AS_HELP_STRING([--with-adic2-dir=DIR], [ADIC2 root directory]),
+		[ADIC2_ROOT=${withval}],
+		[ADIC2_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for ADIC2])
+	if test "x${ADIC2_ROOT}" == "xno"; then
+		HAVE_ADIC2=no
+	else
+		HAVE_ADIC2=yes
+		if ! test -d "${ADIC2_ROOT}"; then
+			AC_MSG_ERROR([ADIC2 directory provided (${ADIC2_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_ADIC2}])
+
+	dnl ADIC2 libraries and header files
+	if test "x${HAVE_ADIC2}" == "xyes"; then
+		ADIC2INCL="-DADIC2_DENSE -I${ADIC2_ROOT}/include -I${ADIC2_ROOT}/share/runtime_dense"
+		ADIC2LIB=""
+		AC_DEFINE([_HAVE_ADIC2_], [1], [with ADIC2 in ISSM src])
+		AC_SUBST([ADIC2INCL])
+		AC_SUBST([ADIC2LIB])
+	fi
+	AM_CONDITIONAL([ADIC2], [test "x${HAVE_ADIC2}" == "xyes"])
+	dnl }}}
+	dnl ATLAS {{{
+	AC_ARG_WITH(
+		[atlas-dir],
+		AS_HELP_STRING([--with-atlas-dir=DIR], [ATLAS root directory]),
+		[ATLAS_ROOT=${withval}],
+		[ATLAS_ROOT="no"]
+	)
+	AC_MSG_CHECKING(for ATLAS and CBLAS libraries)
+	if test "x${ATLAS_ROOT}" == "xno"; then
+		HAVE_ATLAS=no
+	else
+		HAVE_ATLAS=yes
+		if ! test -d "${ATLAS_ROOT}"; then
+			AC_MSG_ERROR([ATLAS directory provided (${ATLAS_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_ATLAS}])
+
+	dnl ATLAS libraries and header files
+	if test "x${HAVE_ATLAS}" == "xyes"; then
+		case "${host_os}" in
+			*cygwin*)
+				ATLASLIB="-L`cygpath -m ${ATLAS_ROOT}` -Wl,libatlas.lib  -Wl,libcblas.lib"
+			;;
+			*linux*)
+				ATLASLIB="-L${ATLAS_ROOT}/lib -lcblas -latlas -lm "
+			;;
+			*darwin*)
+				ATLASLIB="-L${ATLAS_ROOT}/lib -lcblas -latlas -lm "
+			;;
+		esac
+		AC_DEFINE([_HAVE_ATLAS_], [1], [with ATLAS in ISSM src])
+		AC_SUBST([ATLASLIB])
+	fi
+	dnl }}}
+	dnl GSL{{{
+	AC_ARG_WITH(
+		[gsl-dir],
+		AS_HELP_STRING([--with-gsl-dir=DIR], [GSL root directory]),
+		[GSL_ROOT=${withval}],
+		[GSL_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for GSL])
+	if test "x${GSL_ROOT}" == "xno"; then
+		HAVE_GSL=no
+	else
+		HAVE_GSL=yes
+		if ! test -d "${GSL_ROOT}"; then
+			AC_MSG_ERROR([GSL directory provided (${GSL_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_GSL}])
+
+	dnl GSL libraries and header files
+	if test "x${HAVE_GSL}" == "xyes"; then
+		GSLINCL="-I${GSL_ROOT}/include"
+		if test "x${HAVE_ATLAS}" == "xyes"; then
+			GSLLIB="-dy -L${GSL_ROOT}/lib -lgsl -L${ATLAS_ROOT}/lib -lcblas -latlas -lm"
+		else
+			GSLLIB="-L${GSL_ROOT}/lib -lgsl -lgslcblas -lm"
+		fi
+		AC_DEFINE([_HAVE_GSL_], [1], [with GSL in ISSM src])
+		AC_SUBST([GSLINCL])
+		AC_SUBST([GSLLIB])
+	fi
+	AM_CONDITIONAL([GSL], [test "x${HAVE_GSL}" == "xyes"])
+	dnl }}}
+	dnl AMPI (ADOL-C){{{
+	AC_ARG_WITH(
+		[ampi-dir],
+		AS_HELP_STRING([--with-ampi-dir=DIR], [Adjoinable MPI root directory]),
+		[AMPI_ROOT=${withval}],
+		[AMPI_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for AMPI])
+	if test "x${AMPI_ROOT}" == "xno"; then
+		HAVE_AMPI=no
+	else
+		HAVE_AMPI=yes
+		if ! test -d "${AMPI_ROOT}"; then
+			AC_MSG_ERROR([AMPI directory provided (${AMPI_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_AMPI}])
+
+	dnl AMPI libraries and header files
+	if test "x${HAVE_AMPI}" == "xyes"; then
+		AMPIINCL="-I${AMPI_ROOT}/include"
+		if test "x${ADOLC_ROOT}" == "xno"; then
+			AC_MSG_ERROR([cannot run AMPI without ADOL-C]);
+		fi
+		dnl AMPILIB="-dy -L${AMPI_ROOT}/lib -lampiCommon -L${ADOLC_ROOT}/lib -ladolc -L${AMPI_ROOT}/lib -lampiCommon -lampiBookkeeping -lampiTape"
+		dnl AMPILIB="-dy -L${AMPI_ROOT}/lib  -L${ADOLC_ROOT}/lib -Wl,--start-group,-lampiCommon,-ladolc,-lampiCommon,-lampiBookkeeping,-lampiTape,-lampiPlainC,-lampiADtoolStubsST,--end-group"
+		dnl AMPILIB="-L${AMPI_ROOT}/lib  -L${ADOLC_ROOT}/lib -Wl,--start-group -lampiCommon -ladolc -lampiCommon -lampiBookkeeping -lampiTape -lampiPlainC -lampiADtoolStubsST -Wl,--end-group"
+		dnl AMPILIB="${AMPI_ROOT}/lib/libampiCommon.so ${ADOLC_ROOT}/lib/libadolc.so  ${AMPI_ROOT}/lib/libampiCommon.so ${AMPI_ROOT}/lib/libampiBookkeeping.so ${AMPI_ROOT}/lib/libampiTape.so ${AMPI_ROOT}/lib/libampiPlainC.so  ${AMPI_ROOT}/lib/libampiADtoolStubsST.so"
+		dnl AMPILIB="-dy -L${AMPI_ROOT}/lib  -L${ADOLC_ROOT}/lib -lampiCommon -ladolc -lampiCommon -lampiBookkeeping -lampiTape -lampiPlainC -lampiADtoolStubsST"
+		AMPILIB="-dy -L${AMPI_ROOT}/lib  -lampiCommon -lampiBookkeeping -lampiTape"
+		AC_DEFINE([_HAVE_AMPI_], [1], [with AMPI in ISSM src])
+		AC_SUBST([AMPIINCL])
+		AC_SUBST([AMPILIB])
+	fi
+	AM_CONDITIONAL([AMPI], [test "x${HAVE_AMPI}" == "xyes"])
+	dnl }}}
+	dnl Adjoint MPI (CoDiPack){{{
+	AC_ARG_WITH(
+		[adjointmpi-dir],
+		AS_HELP_STRING([--with-adjointmpi-dir=DIR], [Adjoint MPI root directory]),
+		[ADJOINTMPI_ROOT=${withval}],
+		[ADJOINTMPI_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for Adjoint MPI])
+	if test "x${ADJOINTMPI_ROOT}" == "xno"; then
+		HAVE_ADJOINTMPI=no
+	else
+		HAVE_ADJOINTMPI=yes
+		if ! test -d "${ADJOINTMPI_ROOT}"; then
+			AC_MSG_ERROR([Adjoint MPI directory provided (${ADJOINTMPI_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_ADJOINTMPI}])
+
+	dnl Adjoint MPI libraries and header files
+	if test "x${HAVE_ADJOINTMPI}" == "xyes"; then
+		if test "x${CODIPACK_ROOT}" == "xno"; then
+			AC_MSG_ERROR([cannot run Adjoint MPI without CoDiPack]);
+		fi
+		ADJOINTMPIINCL="-I${ADJOINTMPI_ROOT}/include"
+		ADJOINTMPILIB="-L${ADJOINTMPI_ROOT}/lib  -lAMPI"
+		dnl Also set _HAVE_AMPI_, because the interface is (almost) the same as
+		dnl for AMPI
+		AC_DEFINE([_HAVE_AMPI_], [1], [with AMPI in ISSM src])
+		AC_DEFINE([_HAVE_ADJOINTMPI_], [1], [with Adjoint MPI in ISSM src])
+		AC_SUBST([ADJOINTMPIINCL])
+		AC_SUBST([ADJOINTMPILIB])
+	fi
+	AM_CONDITIONAL([ADJOINTMPI], [test "x${HAVE_ADJOINTMPI}" == "xyes"])
+	dnl }}}
+	dnl MeDiPack (CoDiPack, ADOL-C dev){{{
+	AC_ARG_WITH(
+		[medipack-dir],
+		AS_HELP_STRING([--with-medipack-dir=DIR], [MeDiPack root directory]),
+		[MEDIPACK_ROOT=${withval}],
+		[MEDIPACK_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for MeDiPack])
+	if test "x${MEDIPACK_ROOT}" == "xno"; then
+		HAVE_MEDIPACK=no
+	else
+		HAVE_MEDIPACK=yes
+		if ! test -d "${MEDIPACK_ROOT}"; then
+			AC_MSG_ERROR([MeDiPack directory provided (${MEDIPACK_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_MEDIPACK}])
+
+	dnl MeDiPack libraries and header files
+	if test "x${HAVE_MEDIPACK}" == "xyes"; then
+		if test "x${CODIPACK_ROOT}" == "xno"; then
+			AC_MSG_ERROR([cannot run MeDiPack without CoDiPack]);
+		fi
+		MEDIPACKINCL="-I${MEDIPACK_ROOT}/include -I${MEDIPACK_ROOT}/src"
+		dnl Also set _HAVE_AMPI_, because the interface is (almost) the same as
+		dnl for AMPI
+		AC_DEFINE([_HAVE_AMPI_], [1], [with AMPI in ISSM src])
+		AC_DEFINE([_HAVE_MEDIPACK_], [1], [with MeDiPack in ISSM src])
+		AC_SUBST([MEDIPACKINCL])
+	fi
+	AM_CONDITIONAL([MEDIPACK], [test "x${HAVE_MEDIPACK}" == "xyes"])
+	dnl }}}
+	dnl PETSc{{{
+	AC_ARG_WITH(
+		[petsc-dir],
+		AS_HELP_STRING([--with-petsc-dir=DIR], [PETSc root directory, necessary for parallel build]),
+		[PETSC_ROOT=${withval}],
+		[PETSC_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for PETSc])
+	if test "x${PETSC_ROOT}" == "xno"; then
+		HAVE_PETSC=no
+	else
+		HAVE_PETSC=yes
+		if ! test -d "${PETSC_ROOT}"; then
+			AC_MSG_ERROR([PETSc directory provided (${PETSC_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_PETSC}])
+	AM_CONDITIONAL([PETSC], [test "x${HAVE_PETSC}" == "xyes"])
+
+	dnl PETSc libraries and header files
+	if test "x${HAVE_PETSC}" == "xyes"; then
+		AC_MSG_CHECKING(for PETSc version)
+		if ! test -f "${PETSC_ROOT}/include/petscversion.h"; then
+			AC_MSG_ERROR([PETSc not instaled correctly: file (${PETSC_ROOT}/include/petscversion.h) does not exist!]);
+		fi
+		PETSC_MAJOR=`cat ${PETSC_ROOT}/include/petscversion.h | grep "#define PETSC_VERSION_MAJOR" | sed 's/#define PETSC_VERSION_MAJOR//' | sed 's/ //g'`
+		PETSC_MINOR=`cat ${PETSC_ROOT}/include/petscversion.h | grep "#define PETSC_VERSION_MINOR" | sed 's/#define PETSC_VERSION_MINOR//' | sed 's/ //g'`
+		AC_DEFINE_UNQUOTED(_PETSC_MAJOR_, $PETSC_MAJOR, [PETSc version major])
+		AC_DEFINE_UNQUOTED(_PETSC_MINOR_, $PETSC_MINOR, [PETSc version minor])
+		AC_MSG_RESULT([${PETSC_MAJOR}.${PETSC_MINOR}])
+
+		dnl PETSC_VERSION_DATE_HG=`cat ${PETSC_ROOT}/include/petscversion.h | grep "#define PETSC_VERSION_DATE_HG" | sed 's/#define PETSC_VERSION_DATE_HG//' | sed 's/ //g' | sed -e 's/\"//g' `
+		PETSC_RELEASE=`cat ${PETSC_ROOT}/include/petscversion.h | grep "#define PETSC_VERSION_RELEASE" | sed 's/#define PETSC_VERSION_RELEASE//' | sed 's/ //g'`
+
+		AC_MSG_CHECKING(whether PETSc is the development version)
+		dnl if test "x${PETSC_VERSION_DATE_HG}" == "xunknown"; then
+		if test "${PETSC_RELEASE}" == "0"; then
+			AC_DEFINE([_HAVE_PETSCDEV_], [1], [with PETSc-dev])
+			AC_MSG_RESULT([yes])
+		else
+			AC_MSG_RESULT([no])
+		fi
+
+		AC_ARG_WITH(
+			[petsc-arch],
+			AS_HELP_STRING([--with-petsc-arch=DIR], [PETSc arch, necessary for PETSc < 3.0]),
+			[PETSC_ARCH=${withval}],
+			[PETSC_ARCH=""]
+		)
+
+		AC_MSG_CHECKING(for PETSc libraries and header files in ${PETSC_ROOT})
+		dnl To get PETSc's libraries,
+		dnl
+		dnl		cd $ISSM_DIR/externalpackages/petsc/src
+		dnl		make getlinklibs
+		dnl
+		PETSCINCL=" -I${PETSC_ROOT}/include"
+		dnl Add other location (not needed anymore since at least PETSc 3.0)
+		if test -n "${PETSC_ARCH}" && test -d "${PETSC_ROOT}/${PETSC_ARCH}/include"; then
+			PETSCINCL+=" ${PETSC_ROOT}/${PETSC_ARCH}/include"
+		fi
+		if test -n "${PETSC_ARCH}" && test -d "${PETSC_ROOT}/include/${PETSC_ARCH}"; then
+			PETSCINCL+=" ${PETSC_ROOT}/include/${PETSC_ARCH}"
+		fi
+
+		case "${host_os}" in
+			*cygwin*)
+				if test ${PETSC_MAJOR} -lt 3; then
+					PETSCLIB=-Wl,/LIBPATH:`cygpath -w ${PETSC_ROOT}/lib` -Wl,libpetscksp.lib  -Wl,libpetscdm.lib -Wl,libpetscmat.lib -Wl,libpetscvec.lib -Wl,libpetscsnes.lib  -Wl,libpetscts.lib -Wl,libmpiuni.lib -Wl,libpetsc.lib
+				else
+					PETSCLIB="/link -Wl,/LIBPATH:`cygpath -m ${PETSC_ROOT}/lib` -Wl,libpetsc.lib"
+					PETSCINCL="/I`cygpath -m ${PETSC_ROOT}/include`"
+				fi
+			;;
+			*linux*)
+				if test ${PETSC_MAJOR} -lt 3; then
+					PETSCLIB="-L${PETSC_ROOT}/lib -lpetscksp -lpetscdm -lpetscmat -lpetscvec -lpetscsnes -lpetscts -lmpiuni -lpetsc"
+				else
+					PETSCLIB="-L${PETSC_ROOT}/lib -lpetsc -ldl"
+					if test ${PETSC_MAJOR} -gt 3 || test ${PETSC_MINOR} -ge 3; then
+						PETSCLIB+=" -lparmetis -lmetis"
+					fi
+				fi
+				if test "x$host_os_version" = "x3.0.101-0.31.1_1.0502.8394-cray_gem_s"; then
+					PETSCLIB="-L${PETSC_ROOT}/lib -lcraypetsc_gnu_real -lmetis"
+				fi
+			;;
+			*darwin*)
+				if test ${PETSC_MAJOR} -lt 3; then
+					PETSCLIB="-L${PETSC_ROOT}/lib -lpetscksp -lpetscdm -lpetscmat -lpetscvec -lpetscsnes -lpetscts -lpetsc"
+				else
+					PETSCLIB="-L${PETSC_ROOT}/lib -lpetsc"
+					if test ${PETSC_MAJOR} -gt 3 || test ${PETSC_MINOR} -ge 3; then
+						PETSCLIB+=" -lmetis"
+					fi
+				fi
+			;;
+		esac
+		AC_MSG_RESULT([done])
+		AC_DEFINE([_HAVE_PETSC_], [1], [with PETSc in ISSM src])
+		AC_SUBST([PETSCINCL])
+		AC_SUBST([PETSCLIB])
+	fi
+	dnl }}}
+	dnl MPI{{{
+	AC_MSG_CHECKING(for MPI)
+
+	AC_ARG_WITH(
+		[mpi-include],
+		AS_HELP_STRING([--with-mpi-include=DIR], [MPI include directory, necessary for parallel build]),
+		[MPI_INCLUDE=${withval}],
+		[MPI_INCLUDE=""]
+	)
+
+	AC_ARG_WITH(
+		[mpi-libdir],
+		AS_HELP_STRING([--with-mpi-libdir=DIR], [MPI library directory, necessary for parallel build]),
+		[MPI_LIBDIR=${withval}],
+		[MPI_LIBDIR=""]
+	)
+
+	AC_ARG_WITH(
+		[mpi-libflags],
+		AS_HELP_STRING([--with-mpi-libflags=LIBS], [MPI libraries to be used, necessary for parallel build]),
+		[MPI_LIBFLAGS=${withval}],
+		[MPI_LIBFLAGS=""]
+	)
+
+	if test -z "${MPI_INCLUDE}"; then
+		HAVE_MPI=no
+	else
+		HAVE_MPI=yes
+
+		dnl Processing for Windows
+		dnl
+		dnl NOTE: We know $VENDOR cannot be empty at this point, so no need to
+		dnl		  check again in the following conditionals
+		dnl
+		if test "${VENDOR}" == "intel-win7-32"; then
+			MPI_LIBDIR=`cygpath -m ${MPI_LIBDIR}`
+			MPI_INCLUDE=`cygpath -m ${MPI_INCLUDE}`
+		elif test "${VENDOR}" == "intel-win7-64"; then
+			MPI_LIBDIR="/I`cygpath -m ${MPI_LIBDIR}`"
+			MPI_INCLUDE=`cygpath -m ${MPI_INCLUDE}`
+		elif test "${VENDOR}" == "MSVC-Win64" || test "${VENDOR}" == "MSVC-Win64-par"; then
+			MPI_LIBDIR=`cygpath -m ${MPI_LIBDIR}`
+			MPI_INCLUDE=`cygpath -m ${MPI_INCLUDE}`
+		fi
+
+		if test -z "${MPI_LIBDIR}"; then
+			MPILIB="${MPI_LIBFLAGS}"
+		else
+			MPILIB="-L${MPI_LIBDIR} ${MPI_LIBFLAGS}"
+		fi
+
+		if test "x${IS_WINDOWS}" == "xyes"; then
+			MPIINCL=/I"${MPI_INCLUDE}"
+		else
+			MPIINCL="-I${MPI_INCLUDE}"
+		fi
+
+		AC_DEFINE([_HAVE_MPI_], [1], [with MPI in ISSM src])
+		AC_DEFINE([HAVE_MPI], [1], [MPI flag for Dakota (DO NOT REMOVE)])
+		AC_SUBST([MPIINCL])
+		AC_SUBST([MPILIB])
+	fi
+	AM_CONDITIONAL([MPI], [test "x${HAVE_MPI}" == "xyes"])
+	AC_MSG_RESULT([${HAVE_MPI}])
+	dnl }}}
+	dnl SCOTCH{{{
+	AC_ARG_WITH(
+		[scotch-dir],
+		AS_HELP_STRING([--with-scotch-dir=DIR], [SCOTCH root directory]),
+		[SCOTCH_ROOT=$withval],
+		[SCOTCH_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SCOTCH])
+	if test "x${SCOTCH_ROOT}" == "xno"; then
 		HAVE_SCOTCH=no
 	else
 		HAVE_SCOTCH=yes
-		if ! test -d "$SCOTCH_ROOT"; then
-			AC_MSG_ERROR([scotch directory provided ($SCOTCH_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SCOTCH)
-	AM_CONDITIONAL([SCOTCH],[test x$HAVE_SCOTCH = xyes])
-
-	dnl scotch libraries
-	if test "x$HAVE_SCOTCH" = "xyes"; then
-		SCOTCHINCL="-DNOFILEIO -I$SCOTCH_ROOT/include -DSCOTCH_VERSION=\\\"UNKNOWN\\\""
-		SCOTCHLIB="-L$SCOTCH_ROOT/lib -lnfioscotch -lnfioscotcherr -lnfioscotcherrexit "
-		AC_DEFINE([_HAVE_SCOTCH_],[1],[with Scotch in ISSM src])
+		if ! test -d "${SCOTCH_ROOT}"; then
+			AC_MSG_ERROR([SCOTCH directory provided (${SCOTCH_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SCOTCH}])
+	AM_CONDITIONAL([SCOTCH], [test "x${HAVE_SCOTCH}" == "xyes"])
+
+	dnl SCOTCH libraries and header files
+	if test "x${HAVE_SCOTCH}" == "xyes"; then
+		if test "x${SCOTCH_ROOT}" == "x${PETSC_ROOT}"; then
+			AC_DEFINE([_PETSC_SCOTCH_], [1], [is SCOTCH installed via PETSc])
+			SCOTCHINCL="-DNOFILEIO -I${SCOTCH_ROOT}/include -DSCOTCH_VERSION=\\\"UNKNOWN\\\""
+			SCOTCHLIB="-L${SCOTCH_ROOT}/lib -lnfioscotch -lnfioscotcherr -lnfioscotcherrexit "
+		else
+			SCOTCHINCL="-I${SCOTCH_ROOT}/include"
+			SCOTCHLIB="-L${SCOTCH_ROOT}/lib "
+			if test "x${HAVE_MPI}" == "xyes"; then
+				SCOTCHLIB+="-lptesmumps -lptscotch -lptscotcherr -lptscotcherrexit -lscotch"
+			else
+				SCOTCHLIB+="-lscotch -lscotcherr -lscotcherrexit"
+			fi
+		fi
+		AC_DEFINE([_HAVE_SCOTCH_], [1], [with SCOTCH in ISSM src])
 		AC_SUBST([SCOTCHINCL])
 		AC_SUBST([SCOTCHLIB])
 	fi
 	dnl }}}
-	dnl esmf{{{
-	AC_ARG_WITH([esmf-dir],
-		AS_HELP_STRING([--with-esmf-dir=DIR], [esmf root directory.]),
-		[ESMF_ROOT=$withval],[ESMF_ROOT="no"])
-
-	dnl Check whether esmf is enabled
-	AC_MSG_CHECKING([for esmf])
-	if test "x$ESMF_ROOT" = "xno" ; then
-		HAVE_ESMF=no
-	else
-		HAVE_ESMF=yes
-		if ! test -d "$ESMF_ROOT"; then
-			AC_MSG_ERROR([esmf directory provided ($ESMF_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ESMF)
-
-	dnl esmf headers and libraries
-	if test "x$HAVE_ESMF" == "xyes"; then
-		ESMFINCL="-I$ESMF_ROOT/include"
-		ESMFLIB="-L$ESMF_ROOT/lib -lesmf"
-		AC_DEFINE([_HAVE_ESMF_],[1],[with esmf in ISSM src])
-		AC_SUBST([ESMFINCL])
-		AC_SUBST([ESMFLIB])
-	fi
-	AM_CONDITIONAL([ESMF], [test x$HAVE_ESMF = xyes])
-	dnl }}}
-	dnl codipack{{{
-	AC_ARG_WITH([codipack-dir],
-		AS_HELP_STRING([--with-codipack-dir=DIR], [CoDiPack root directory.]),
-		[CODIPACK_ROOT=$withval],[CODIPACK_ROOT="no"])
-
-	dnl Check whether codipack is enabled
-	AC_MSG_CHECKING([for CoDiPack])
-	if test "x$CODIPACK_ROOT" = "xno" ; then
-		HAVE_CODIPACK=no
-	else
-		HAVE_CODIPACK=yes
-		if ! test -d "$CODIPACK_ROOT"; then
-			AC_MSG_ERROR([codipack directory provided ($CODIPACK_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_CODIPACK)
-
-	dnl codipack headers and libraries
-	if test "x$HAVE_CODIPACK" == "xyes"; then
-		CODIPACKINCL="-I$CODIPACK_ROOT/include"
-		AC_DEFINE([_HAVE_CODIPACK_],[1],[with codipack in ISSM src])
-		AC_DEFINE([_HAVE_AD_],[1],[with AD in ISSM src])
-		AC_SUBST([CODIPACKINCL])
-	fi
-	AM_CONDITIONAL([CODIPACK], [test x$HAVE_CODIPACK = xyes])
-   AM_COND_IF(CODIPACK,[CXXFLAGS+=" -std=c++11"])
-	dnl }}}
-	dnl tapeallocation {{{
-	AC_ARG_ENABLE([tape-alloc], dnl feature
-		AS_HELP_STRING([--enable-tape-alloc],[turn tape allocation support on]),
-		[enable_tape_alloc=$enableval], [enable_tape_alloc=no])
-
-	dnl check whether enabled
-	AC_MSG_CHECKING(for tape memory allocation)
-	if test "x$enable_tape_alloc" = xyes; then
-		AC_DEFINE([_AD_TAPE_ALLOC_],[1],[Macro to enable a priori tape allocation for AD])
-	fi
-	AC_MSG_RESULT($enable_tape_alloc)
-	dnl }}}
-	dnl adolc{{{
-	AC_ARG_WITH([adolc-dir],
-		AS_HELP_STRING([--with-adolc-dir=DIR], [adolc root directory.]),
-		[ADOLC_ROOT=$withval],[ADOLC_ROOT="no"])
-
-	dnl Check whether adolc is enabled
-	AC_MSG_CHECKING([for adolc])
-	if test "x$ADOLC_ROOT" = "xno" ; then
-		HAVE_ADOLC=no
-	else
-		HAVE_ADOLC=yes
-		if ! test -d "$ADOLC_ROOT"; then
-			AC_MSG_ERROR([adolc directory provided ($ADOLC_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ADOLC)
-
-	dnl adolc headers and libraries
-	if test "x$HAVE_ADOLC" == "xyes"; then
-		ADOLCINCL="-I$ADOLC_ROOT/include"
-		dnl ADOLCLIB="-L$ADOLC_ROOT/lib64 -ladolc" used to be the path
-		ADOLCLIB="-L$ADOLC_ROOT/lib -ladolc"
-		AC_DEFINE([_HAVE_ADOLC_],[1],[with adolc in ISSM src])
-		AC_DEFINE([_HAVE_AD_],[1],[with AD in ISSM src])
-		AC_SUBST([ADOLCINCL])
-		AC_SUBST([ADOLCLIB])
-	fi
-	AM_CONDITIONAL([ADOLC], [test x$HAVE_ADOLC = xyes])
-   AM_COND_IF(ADOLC,[CXXFLAGS+=" -std=c++11"])
-	dnl }}}
-	dnl adolc-version{{{
-	AC_ARG_WITH([adolc-version],
-		AS_HELP_STRING([--with-adolc-version=number], [adolc version.]),
-		[ADOLC_VERSION=$withval],[ADOLC_VERSION=2])
-	AC_MSG_CHECKING(for adolc-version)
-
-	AC_DEFINE_UNQUOTED([_ADOLC_VERSION_],$ADOLC_VERSION,[ADOLC version])
-	AC_MSG_RESULT($ADOLC_VERSION)
-	dnl }}}
-	dnl adic2{{{
-	AC_ARG_WITH([adic2-dir],
-	  AS_HELP_STRING([--with-adic2-dir=DIR], [adic2 root directory.]),
-	  [ADIC2_ROOT=$withval],[ADIC2_ROOT="no"])
-
-	dnl Check whether adic2 is enabled
-	AC_MSG_CHECKING([for adic2])
-	if test "x$ADIC2_ROOT" = "xno" ; then
-		HAVE_ADIC2=no
-	else
-		HAVE_ADIC2=yes
-		if ! test -d "$ADIC2_ROOT"; then
-			AC_MSG_ERROR([adic2 directory provided ($ADIC2_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ADIC2)
-
-	dnl adic2 headers and libraries
-	if test "x$HAVE_ADIC2" == "xyes"; then
-		ADIC2INCL="-DADIC2_DENSE -I$ADIC2_ROOT/include -I$ADIC2_ROOT/share/runtime_dense/"
-		ADIC2LIB=""
-		AC_DEFINE([_HAVE_ADIC2_],[1],[with adic2 in ISSM src])
-		AC_SUBST([ADIC2INCL])
-		AC_SUBST([ADIC2LIB])
-	fi
-	AM_CONDITIONAL([ADIC2], [test x$HAVE_ADIC2 = xyes])
-	dnl }}}
-	dnl atlas{{{
-	AC_ARG_WITH([atlas-dir],
-	  AS_HELP_STRING([--with-atlas-dir=DIR],[atlas root directory]),
-	  [ATLAS_ROOT=$withval],[ATLAS_ROOT="no"])
-
-	dnl Check whether atlas is enabled
-	AC_MSG_CHECKING(for atlas and cblas libraries)
-	if test "x$ATLAS_ROOT" = "xno" ; then
-		HAVE_ATLAS=no
-	else
-		HAVE_ATLAS=yes
-		if ! test -d "$ATLAS_ROOT"; then
-			AC_MSG_ERROR([atlas directory provided ($ATLAS_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ATLAS)
-
-	dnl atlas headers and libraries
-	if test "x$HAVE_ATLAS" == "xyes"; then
-		dnl: branch on whether we are running on windows or linux.
-		case "${host_os}" in
-			*cygwin*)
-			ATLASLIB="-L`cygpath -m $ATLAS_ROOT` -Wl,libatlas.lib  -Wl,libcblas.lib"
-			;;
-			*linux*)
-			ATLASLIB=-L"$ATLAS_ROOT/lib -lcblas -latlas -lm "
-			;;
-			*darwin*)
-			ATLASLIB=-L"$ATLAS_ROOT/lib -lcblas -latlas -lm"
-			;;
-		esac
-		AC_DEFINE([_HAVE_ATLAS_],[1],[with ATLAS in ISSM src])
-		AC_SUBST([ATLASLIB])
-	fi
-	dnl }}}
-	dnl gsl{{{
-	AC_ARG_WITH([gsl-dir],
-	  AS_HELP_STRING([--with-gsl-dir=DIR], [gsl root directory.]),
-	  [GSL_ROOT=$withval],[GSL_ROOT="no"])
-
-	dnl Check whether gsl is enabled
-	AC_MSG_CHECKING([for gsl])
-	if test "x$GSL_ROOT" = "xno" ; then
-		HAVE_GSL=no
-	else
-		HAVE_GSL=yes
-		if ! test -d "$GSL_ROOT"; then
-			AC_MSG_ERROR([gsl directory provided ($GSL_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_GSL)
-
-	dnl gsl headers and libraries
-	if test "x$HAVE_GSL" == "xyes"; then
-		GSLINCL="-I$GSL_ROOT/include"
-		if test "x$HAVE_ATLAS" = "xyes" ; then
-			GSLLIB="-dy -L$GSL_ROOT/lib -lgsl -L$ATLAS_ROOT/lib -lcblas -latlas -lm"
-		else
-			GSLLIB="-L$GSL_ROOT/lib -lgsl -lgslcblas -lm"
-		fi
-		AC_DEFINE([_HAVE_GSL_],[1],[with gsl in ISSM src])
-		AC_SUBST([GSLINCL])
-		AC_SUBST([GSLLIB])
-	fi
-	AM_CONDITIONAL([GSL], [test x$HAVE_GSL = xyes])
-	dnl }}}
-	dnl ampi (ADOLC){{{
-	AC_ARG_WITH([ampi-dir],
-	  AS_HELP_STRING([--with-ampi-dir=DIR], [adjoinable mpi root directory.]),
-	  [AMPI_ROOT=$withval],[AMPI_ROOT="no"])
-
-	dnl Check whether ampi is enabled
-	AC_MSG_CHECKING([for ampi])
-	if test "x$AMPI_ROOT" = "xno" ; then
-		HAVE_AMPI=no
-	else
-		HAVE_AMPI=yes
-		if ! test -d "$AMPI_ROOT"; then
-			AC_MSG_ERROR([ampi directory provided ($AMPI_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_AMPI)
-
-	dnl ampi headers and libraries
-	if test "x$HAVE_AMPI" == "xyes"; then
-		AMPIINCL="-I$AMPI_ROOT/include"
-		if test "x$ADOLC_ROOT" == "xno"; then
-			AC_MSG_ERROR([cannot run adjoinable mpi without adolc]);
-		fi
-		dnl AMPILIB="-dy -L$AMPI_ROOT/lib -lampiCommon -L$ADOLC_ROOT/lib -ladolc -L$AMPI_ROOT/lib -lampiCommon -lampiBookkeeping -lampiTape"
-		dnl AMPILIB="-dy -L$AMPI_ROOT/lib  -L$ADOLC_ROOT/lib -Wl,--start-group,-lampiCommon,-ladolc,-lampiCommon,-lampiBookkeeping,-lampiTape,-lampiPlainC,-lampiADtoolStubsST,--end-group"
-		dnl AMPILIB="-L$AMPI_ROOT/lib  -L$ADOLC_ROOT/lib -Wl,--start-group -lampiCommon -ladolc -lampiCommon -lampiBookkeeping -lampiTape -lampiPlainC -lampiADtoolStubsST -Wl,--end-group"
-		dnl AMPILIB="$AMPI_ROOT/lib/libampiCommon.so $ADOLC_ROOT/lib/libadolc.so  $AMPI_ROOT/lib/libampiCommon.so $AMPI_ROOT/lib/libampiBookkeeping.so $AMPI_ROOT/lib/libampiTape.so $AMPI_ROOT/lib/libampiPlainC.so  $AMPI_ROOT/lib/libampiADtoolStubsST.so"
-		dnl AMPILIB="-dy -L$AMPI_ROOT/lib  -L$ADOLC_ROOT/lib -lampiCommon -ladolc -lampiCommon -lampiBookkeeping -lampiTape -lampiPlainC -lampiADtoolStubsST"
-		AMPILIB="-dy -L$AMPI_ROOT/lib  -lampiCommon -lampiBookkeeping -lampiTape"
-		AC_DEFINE([_HAVE_AMPI_],[1],[with adjoinable mpi in ISSM src])
-		AC_SUBST([AMPIINCL])
-		AC_SUBST([AMPILIB])
-	fi
-	AM_CONDITIONAL([AMPI], [test x$HAVE_AMPI = xyes])
-	dnl }}}
-	dnl adjointmpi (CoDiPack){{{
-	AC_ARG_WITH([adjointmpi-dir],
-	  AS_HELP_STRING([--with-adjointmpi-dir=DIR], [adjoinable mpi root directory.]),
-	  [ADJOINTMPI_ROOT=$withval],[ADJOINTMPI_ROOT="no"])
-
-	dnl Check whether adjointmpi is enabled
-	AC_MSG_CHECKING([for adjointmpi])
-	if test "x$ADJOINTMPI_ROOT" = "xno" ; then
-		HAVE_ADJOINTMPI=no
-	else
-		HAVE_ADJOINTMPI=yes
-		if ! test -d "$ADJOINTMPI_ROOT"; then
-			AC_MSG_ERROR([adjointmpi directory provided ($ADJOINTMPI_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ADJOINTMPI)
-
-	dnl adjointmpi headers and libraries
-	if test "x$HAVE_ADJOINTMPI" == "xyes"; then
-		if test "x$CODIPACK_ROOT" == "xno"; then
-			AC_MSG_ERROR([cannot run adjoint mpi without CoDiPack]);
-		fi
-		ADJOINTMPIINCL="-I$ADJOINTMPI_ROOT/include"
-		ADJOINTMPILIB="-L$ADJOINTMPI_ROOT/lib  -lAMPI"
-		dnl Also set _HAVE_AMPI_, because the interface is (almost) the
-		dnl same as for adjoinable mpi...
-		AC_DEFINE([_HAVE_AMPI_],[1],[with adjoint mpi in ISSM src])
-		AC_DEFINE([_HAVE_ADJOINTMPI_],[1],[with adjoint mpi in ISSM src])
-		AC_SUBST([ADJOINTMPIINCL])
-		AC_SUBST([ADJOINTMPILIB])
-	fi
-	AM_CONDITIONAL([ADJOINTMPI], [test x$HAVE_ADJOINTMPI = xyes])
-	dnl }}}
-	dnl medipack (CoDiPack, ADOLC dev){{{
-	AC_ARG_WITH([medipack-dir],
-	  AS_HELP_STRING([--with-medipack-dir=DIR], [MeDiPack root directory.]),
-	  [MEDIPACK_ROOT=$withval],[MEDIPACK_ROOT="no"])
-
-	dnl Check whether medipack is enabled
-	AC_MSG_CHECKING([for medipack])
-	if test "x$MEDIPACK_ROOT" = "xno" ; then
-		HAVE_MEDIPACK=no
-	else
-		HAVE_MEDIPACK=yes
-		if ! test -d "$MEDIPACK_ROOT"; then
-			AC_MSG_ERROR([medipack directory provided ($MEDIPACK_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_MEDIPACK)
-
-	dnl medipack headers and libraries
-	if test "x$HAVE_MEDIPACK" == "xyes"; then
-		if test "x$CODIPACK_ROOT" == "xno"; then
-			AC_MSG_ERROR([cannot run MeDiPack without CoDiPack]);
-		fi
-		MEDIPACKINCL="-I$MEDIPACK_ROOT/include -I$MEDIPACK_ROOT/src"
-		dnl Also set _HAVE_AMPI_, because the interface is (almost) the
-		dnl same as for adjoinable mpi...
-		AC_DEFINE([_HAVE_AMPI_],[1],[with adjoint mpi in ISSM src])
-		AC_DEFINE([_HAVE_MEDIPACK_],[1],[with MeDiPack in ISSM src])
-		AC_SUBST([MEDIPACKINCL])
-	fi
-	AM_CONDITIONAL([MEDIPACK], [test x$HAVE_MEDIPACK = xyes])
-
-	dnl }}}
-	dnl rose{{{
-	AC_ARG_WITH([rose-dir],
-	  AS_HELP_STRING([--with-rose-dir=DIR], [rose root directory.]),
-	  [ROSE_ROOT=$withval],[ROSE_ROOT="no"])
-
-	dnl Check whether rose is enabled
-	AC_MSG_CHECKING([for rose])
-	if test "x$ROSE_ROOT" = "xno" ; then
-		HAVE_ROSE=no
-	else
-		HAVE_ROSE=yes
-		if ! test -d "$ROSE_ROOT"; then
-			AC_MSG_ERROR([rose directory provided ($ROSE_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ROSE)
-	AM_CONDITIONAL([ROSE],[test x$HAVE_ROSE = xyes])
-
-	dnl library and header files
-	if test "x$HAVE_ROSE" = "xyes"; then
-		ROSEINCL="-I$ROSE_ROOT/include"
-		ROSELIB=""
-		AC_DEFINE([_HAVE_ROSE_],[1],[with rose in ISSM src])
-		AC_SUBST([ROSEINCL])
-		AC_SUBST([ROSELIB])
-	fi
-	dnl }}}
-	dnl mpi{{{
-	AC_MSG_CHECKING(for mpi)
-
-	AC_ARG_WITH([mpi-include],
-	  AS_HELP_STRING([--with-mpi-include=DIR],[mpi include directory, necessary for parallel build]),
-	  [MPI_INCLUDE=$withval],[MPI_INCLUDE=""])
-
-	AC_ARG_WITH([mpi-libdir],
-	  AS_HELP_STRING([--with-mpi-libdir=DIR],[mpi lib directory, necessary for parallel build]),
-	  [MPI_LIBDIR=$withval],[MPI_LIBDIR=""])
-
-	AC_ARG_WITH([mpi-libflags],
-	  AS_HELP_STRING([--with-mpi-libflags=LIBS],[mpi libraries to be used, necessary for parallel build]),
-	  [MPI_LIBFLAGS=$withval],[MPI_LIBFLAGS=""])
-
-
-	if test -z "$MPI_INCLUDE" ; then
-		HAVE_MPI=no
-	else
-		HAVE_MPI=yes
-
-		dnl Processing for windows
-		if  test x$VENDOR = xintel-win7-32; then
-			MPI_LIBDIR=`cygpath -m $MPI_LIBDIR`
-			MPI_INCLUDE=`cygpath -m $MPI_INCLUDE`
-		elif test x$VENDOR = xintel-win7-64; then
-			MPI_LIBDIR="/I`cygpath -m $MPI_LIBDIR`"
-			MPI_INCLUDE=`cygpath -m $MPI_INCLUDE`
-		elif test x$VENDOR = xMSVC-Win64 || test x$VENDOR = xMSVC-Win64-par; then
-			MPI_LIBDIR=`cygpath -m $MPI_LIBDIR`
-			MPI_INCLUDE=`cygpath -m $MPI_INCLUDE`
-		fi
-
-		if test -z "$MPI_LIBDIR"; then
-			MPILIB="$MPI_LIBFLAGS"
-		else
-			MPILIB="-L$MPI_LIBDIR $MPI_LIBFLAGS"
-		fi
-
-		if  test x$IS_WINDOWS = xyes; then
-			MPIINCL=/I"$MPI_INCLUDE"
-		else
-			MPIINCL=-I"$MPI_INCLUDE"
-		fi
-
-		AC_DEFINE([_HAVE_MPI_],[1],[with Mpi in ISSM src])
-		AC_DEFINE([HAVE_MPI],[1],[Mpi Flag for Dakota (DO NOT REMOVE)])
-		AC_SUBST([MPIINCL])
-		AC_SUBST([MPILIB])
-	fi
-	AM_CONDITIONAL([MPI], [test x$HAVE_MPI = xyes])
-	AC_MSG_RESULT($HAVE_MPI)
-	dnl }}}
-	dnl petsc{{{
-	AC_ARG_WITH([petsc-dir],
-	  AS_HELP_STRING([--with-petsc-dir=DIR],[PETSc root directory, necessary for parallel build]),
-	  [PETSC_ROOT=$withval],[PETSC_ROOT="no"])
-
-	dnl Check whether petsc is enabled
-	AC_MSG_CHECKING([for petsc])
-	if test "x$PETSC_ROOT" = "xno" ; then
-		HAVE_PETSC=no
-	else
-		HAVE_PETSC=yes
-		if ! test -d "$PETSC_ROOT"; then
-			AC_MSG_ERROR([petsc directory provided ($PETSC_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_PETSC)
-	AM_CONDITIONAL([PETSC],[test x$HAVE_PETSC = xyes])
-
-	dnl library and header files
-	if test "x$HAVE_PETSC" = "xyes"; then
-		AC_MSG_CHECKING(for petsc version)
-	   if ! test -f "$PETSC_ROOT/include/petscversion.h"; then
-			AC_MSG_ERROR([PETSc not instaled corretly: file ($PETSC_ROOT/include/petscversion.h) does not exist]);
-		fi
-		PETSC_MAJOR=`cat $PETSC_ROOT/include/petscversion.h | grep "#define PETSC_VERSION_MAJOR" | sed 's/#define PETSC_VERSION_MAJOR//' | sed 's/ //g'`
-		PETSC_MINOR=`cat $PETSC_ROOT/include/petscversion.h | grep "#define PETSC_VERSION_MINOR" | sed 's/#define PETSC_VERSION_MINOR//' | sed 's/ //g'`
-		AC_DEFINE_UNQUOTED([_PETSC_MAJOR_],$PETSC_MAJOR,[PETSc version major])
-		AC_DEFINE_UNQUOTED([_PETSC_MINOR_],$PETSC_MINOR,[PETSc version minor])
-		AC_MSG_RESULT($PETSC_MAJOR.$PETSC_MINOR)
-
-		PETSC_VERSION_DATE_HG=`cat $PETSC_ROOT/include/petscversion.h | grep "#define PETSC_VERSION_DATE_HG" | sed 's/#define PETSC_VERSION_DATE_HG//' | sed 's/ //g' | sed -e 's/\"//g' `
-		PETSC_RELEASE=`cat $PETSC_ROOT/include/petscversion.h | grep "#define PETSC_VERSION_RELEASE" | sed 's/#define PETSC_VERSION_RELEASE//' | sed 's/ //g'`
-
-		AC_MSG_CHECKING(whether petsc is the development version)
-		dnl if test x$PETSC_VERSION_DATE_HG = xunknown; then
-		if test "$PETSC_RELEASE" = "0"; then
-		   AC_DEFINE([_HAVE_PETSCDEV_],[1],[with PETSc-dev])
-			AC_MSG_RESULT(yes)
-		else
-			AC_MSG_RESULT(no)
-		fi
-
-		AC_ARG_WITH([petsc-arch],
-		  AS_HELP_STRING([--with-petsc-arch=DIR],[PETSc arch, necessary for PETSc < 3.0]),
-		  [PETSC_ARCH=$withval],[PETSC_ARCH=""])
-
-		AC_MSG_CHECKING(for petsc headers and libraries in $PETSC_ROOT)
-		dnl To ge PETSc's libraries:
-		dnl cd externalpackages/petsc/src
-		dnl make getlinklibs
-		PETSCINCL=" -I$PETSC_ROOT/include"
-		dnl Add other location (not needed anymore since at least PETSc 3.0)
-		if test "x$PETSC_ARCH" != "x" && test -d "$PETSC_ROOT/$PETSC_ARCH/include"; then
-		 PETSCINCL+=" $PETSC_ROOT/$PETSC_ARCH/include"
-		fi
-		if test "x$PETSC_ARCH" != "x" && test -d "$PETSC_ROOT/include/$PETSC_ARCH"; then
-		 PETSCINCL+=" $PETSC_ROOT/include/$PETSC_ARCH"
-		fi
-
-		case "${host_os}" in
-				*cygwin*)
-				if test $PETSC_MAJOR -lt 3 ; then
-					PETSCLIB=-Wl,/LIBPATH:`cygpath -w $PETSC_ROOT/lib`  -Wl,libpetscksp.lib  -Wl,libpetscdm.lib  -Wl,libpetscmat.lib  -Wl,libpetscvec.lib  -Wl,libpetscsnes.lib  -Wl,libpetscts.lib  -Wl,libmpiuni.lib  -Wl,libpetsc.lib
-				else
-					PETSCLIB="/link -Wl,/LIBPATH:`cygpath -m $PETSC_ROOT/lib`  -Wl,libpetsc.lib"
-					PETSCINCL="/I`cygpath -m $PETSC_ROOT/include`"
-				fi
-				;;
-				*linux*)
-				if test $PETSC_MAJOR -lt 3 ; then
-					PETSCLIB="-L$PETSC_ROOT/lib -lpetscksp -lpetscdm -lpetscmat -lpetscvec -lpetscsnes -lpetscts -lmpiuni -lpetsc"
-				else
-					PETSCLIB="-L$PETSC_ROOT/lib -lpetsc -ldl"
-					if test $PETSC_MAJOR -gt 3 || test $PETSC_MINOR -ge 3; then PETSCLIB+=" -lparmetis -lmetis"; fi
-				fi
-				if test "x$host_os_version" = "x3.0.101-0.31.1_1.0502.8394-cray_gem_s" ; then
-					PETSCLIB="-L$PETSC_ROOT/lib -lcraypetsc_gnu_real -lmetis"
-				fi
-				;;
-				*darwin*)
-				if test $PETSC_MAJOR -lt 3 ; then
-					PETSCLIB="-L$PETSC_ROOT/lib -lpetscksp -lpetscdm -lpetscmat -lpetscvec -lpetscsnes -lpetscts -lpetsc"
-				else
-					PETSCLIB="-L$PETSC_ROOT/lib -lpetsc"
-					if test $PETSC_MAJOR -gt 3 || test $PETSC_MINOR -ge 3; then PETSCLIB+=" -lmetis"; fi
-				fi
-				;;
-		esac
-		AC_MSG_RESULT(done)
-		AC_DEFINE([_HAVE_PETSC_],[1],[with PETSc in ISSM src])
-		AC_SUBST([PETSCINCL])
-		AC_SUBST([PETSCLIB])
-	fi
-	dnl }}}
-	dnl metis{{{
-	if test "$HAVE_PETSC" = "yes" && test "x$PETSC_MAJOR" = "x3" && test $PETSC_MINOR -ge 3 && test "x$VENDOR" != "xMSVC-Win64" && test "x$VENDOR" != "xMSVC-Win64-par"; then
-		dnl in petsc >=3.3, metis is provided
+	dnl METIS{{{
+
+	dnl NOTE: We know $VENDOR cannot be empty at this point, so no need to
+	dnl		  check again in the following conditionals
+	dnl
+	if test "x${HAVE_PETSC}" == "xyes" && test "x${PETSC_MAJOR}" == "x3" && test ${PETSC_MINOR} -ge 3 && test "${VENDOR}" != "MSVC-Win64" && test "${VENDOR}" != "MSVC-Win64-par"; then
+		dnl In PETSc >=3.3, METIS is provided
 		HAVE_METIS="yes"
-		AC_DEFINE([_METIS_VERSION_],[5],[ Metis version number])
-		AC_DEFINE([_HAVE_METIS_],[1],[with Metis in ISSM src])
-	else
-		AC_ARG_WITH([metis-dir],
-		  AS_HELP_STRING([--with-metis-dir=DIR],[metis root directory. necessary for serial build]),
-		  [METIS_ROOT=$withval],[METIS_ROOT="no"])
-
-		dnl Check whether metis is enabled
-		AC_MSG_CHECKING([for metis])
-		if test "x$METIS_ROOT" = "xno" ; then
+		AC_DEFINE([_METIS_VERSION_], [5], [METIS version number])
+		AC_DEFINE([_HAVE_METIS_], [1], [with METIS in ISSM src])
+	else
+		AC_ARG_WITH(
+			[metis-dir],
+			AS_HELP_STRING([--with-metis-dir=DIR], [METIS root directory, necessary for serial build]),
+			[METIS_ROOT=${withval}],
+			[METIS_ROOT="no"]
+		)
+		AC_MSG_CHECKING([for METIS])
+		if test "x${METIS_ROOT}" == "xno"; then
 			HAVE_METIS=no
 		else
 			HAVE_METIS=yes
-			if ! test -d "$METIS_ROOT"; then
-				AC_MSG_ERROR([metis directory provided ($METIS_ROOT) does not exist]);
+			if ! test -d "${METIS_ROOT}"; then
+				AC_MSG_ERROR([METIS directory provided (${METIS_ROOT}) does not exist!]);
 			fi
 		fi
-		AC_MSG_RESULT($HAVE_METIS)
-		AM_CONDITIONAL([METIS],[test x$HAVE_METIS = xyes])
-
-		dnl library and header files
-		if test "x$HAVE_METIS" = "xyes"; then
-
-			AC_MSG_CHECKING(for metis headers and libraries in $METIS_ROOT)
-			dnl first figure out version of metis: does the VERSION file exist?
-			if test -e "$METIS_ROOT/VERSION"; then
+		AC_MSG_RESULT([${HAVE_METIS}])
+		AM_CONDITIONAL([METIS], [test "x${HAVE_METIS}" == "xyes"])
+
+		dnl METIS libraries and header files
+		if test "x${HAVE_METIS}" == "xyes"; then
+			dnl Retrieve METIS version (does the VERSION file exist?)
+			if test -f "${METIS_ROOT}/VERSION"; then
 				METIS_VERSION=4
 			else
@@ -1256,152 +1371,155 @@
 			fi
 
-			if test "$METIS_VERSION" = "4" ; then
-				METISINCL=-I"$METIS_ROOT/Lib"
+			if test "x${METIS_VERSION}" == "x4"; then
+				METISINCL="-I${METIS_ROOT}/Lib"
 				case "${host_os}" in
 					*cygwin*)
-					METISINCL="/I`cygpath -m $METIS_ROOT/Lib`"
-					METISLIB="-Wl,/link -Wl,/LIBPATH:`cygpath -m $METIS_ROOT` -Wl,libmetis.lib"
+						METISINCL="/I`cygpath -m ${METIS_ROOT}/Lib`"
+						METISLIB="-Wl,/link -Wl,/LIBPATH:`cygpath -m ${METIS_ROOT}` -Wl,libmetis.lib"
 					;;
 					*linux*)
-					METISLIB=-L"$METIS_ROOT/ -lmetis"
+						METISLIB="-L${METIS_ROOT} -lparmetis -lmetis"
 					;;
 					*darwin*)
-					METISLIB=-L"$METIS_ROOT/ -lmetis"
+						METISLIB="-L${METIS_ROOT} -lparmetis -lmetis"
 					;;
 				esac
-				AC_DEFINE([_METIS_VERSION_],[4],[ Metis version number])
+				AC_DEFINE([_METIS_VERSION_], [4], [ METIS version number])
 			fi
 
-			if test "$METIS_VERSION" = "5" ; then
+			if test "x${METIS_VERSION}" == "x5"; then
 				case "${host_os}" in
 					*cygwin*)
-					METISLIB="-L$METIS_ROOT libmetis.lib"
+						METISLIB="-L${METIS_ROOT} libmetis.lib"
 					;;
 					*linux*)
-					METISLIB=-L"$METIS_ROOT/lib -lmetis"
+						METISLIB="-L${METIS_ROOT}/lib -lparmetis -lmetis"
 					;;
 					*darwin*)
-					METISLIB=-L"$METIS_ROOT/lib -lmetis"
+						METISLIB="-L${METIS_ROOT}/lib -lparmetis -lmetis"
 					;;
 				esac
-				METISINCL=-I"$METIS_ROOT/include"
-				AC_DEFINE([_METIS_VERSION_],[5],[ Metis version number])
+				METISINCL="-I${METIS_ROOT}/include"
+				AC_DEFINE([_METIS_VERSION_], [5], [METIS version number])
 			fi
 
-			AC_DEFINE([_HAVE_METIS_],[1],[with Metis in ISSM src])
+			AC_DEFINE([_HAVE_METIS_], [1], [with METIS in ISSM src])
 			AC_SUBST([METISINCL])
 			AC_SUBST([METISLIB])
 		fi
 	fi
-	AM_CONDITIONAL([METIS],[test x$HAVE_METIS = xyes])
-	dnl }}}
-	dnl tao{{{
-	AC_ARG_WITH([tao-dir],
-		AS_HELP_STRING([--with-tao-dir=DIR], [tao root directory.]),
-		[TAO_ROOT=$withval],[TAO_ROOT="no"])
-
-	dnl Check whether tao is enabled
-	AC_MSG_CHECKING([for tao])
-
-	if test "$HAVE_PETSC" = "yes" && test "x$PETSC_MAJOR" = "x3" && test $PETSC_MINOR -ge 5; then
-		dnl in petsc >=3.5, tao is provided
+	AM_CONDITIONAL([METIS], [test "x${HAVE_METIS}" == "xyes"])
+	dnl }}}
+	dnl Toolkit for Advanced Optimization (TAO){{{
+	AC_ARG_WITH(
+		[tao-dir],
+		AS_HELP_STRING([--with-tao-dir=DIR], [TAO root directory]),
+		[TAO_ROOT=${withval}],
+		[TAO_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for TAO])
+
+	if test "x${HAVE_PETSC}" == "xyes" && test "x${PETSC_MAJOR}" == "x3" && test ${PETSC_MINOR} -ge 5; then
+		dnl In PETSc >= 3.5, TAO is provided
 		HAVE_TAO="yes"
-		AC_DEFINE([_HAVE_TAO_],[1],[with Tao in ISSM src])
-		AC_MSG_RESULT($HAVE_TAO)
-	else
-
-		if test "x$TAO_ROOT" = "xno" ; then
+		AC_DEFINE([_HAVE_TAO_], [1], [with TAO in ISSM src])
+		AC_MSG_RESULT([${HAVE_TAO}])
+	else
+		if test "x${TAO_ROOT}" == "xno"; then
 			HAVE_TAO=no
 		else
 			HAVE_TAO=yes
-			if ! test -d "$TAO_ROOT"; then
-				AC_MSG_ERROR([tao directory provided ($TAO_ROOT) does not exist]);
+			if ! test -d "${TAO_ROOT}"; then
+				AC_MSG_ERROR([TAO directory provided (${TAO_ROOT}) does not exist!]);
 			fi
 		fi
-		AC_MSG_RESULT($HAVE_TAO)
-
-		dnl tao headers and libraries
-		if test "x$HAVE_TAO" == "xyes"; then
-		  TAOINCL="-I$TAO_ROOT/ -I$TAO_ROOT/include -I$TAO_ROOT/bmake/ "
-		  TAOLIB="-L$TAO_ROOT/lib -ltao -lpetsc"
-		  AC_DEFINE([_HAVE_TAO_],[1],[with Tao in ISSM src])
-		  AC_SUBST([TAOINCL])
-		  AC_SUBST([TAOLIB])
-		fi
-	fi
-	dnl }}}
-	dnl m1qn3{{{
-	AC_ARG_WITH([m1qn3-dir],
-		AS_HELP_STRING([--with-m1qn3-dir=DIR], [m1qn3 root directory.]),
-		[M1QN3_ROOT=$withval],[M1QN3_ROOT="no"])
-
-	dnl Check whether m1qn3 is enabled
-	AC_MSG_CHECKING([for m1qn3])
-	if test "x$M1QN3_ROOT" = "xno" ; then
+		AC_MSG_RESULT([${HAVE_TAO}])
+
+		dnl TAO libraries and header files
+		if test "x${HAVE_TAO}" == "xyes"; then
+			TAOINCL="-I${TAO_ROOT} -I${TAO_ROOT}/include -I${TAO_ROOT}/bmake"
+			TAOLIB="-L${TAO_ROOT}/lib -ltao -lpetsc"
+			AC_DEFINE([_HAVE_TAO_], [1], [with Tao in ISSM src])
+			AC_SUBST([TAOINCL])
+			AC_SUBST([TAOLIB])
+		fi
+	fi
+	dnl }}}
+	dnl M1QN3{{{
+	AC_ARG_WITH(
+		[m1qn3-dir],
+		AS_HELP_STRING([--with-m1qn3-dir=DIR], [M1QN3 root directory]),
+		[M1QN3_ROOT=${withval}],
+		[M1QN3_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for M1QN3])
+	if test "x${M1QN3_ROOT}" == "xno"; then
 		HAVE_M1QN3=no
 	else
 		HAVE_M1QN3=yes
-		if ! test -d "$M1QN3_ROOT"; then
-			AC_MSG_ERROR([m1qn3 directory provided ($M1QN3_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_M1QN3)
-
-	dnl m1qn3 headers and libraries
-	if test "x$HAVE_M1QN3" == "xyes"; then
-	  M1QN3LIB="$M1QN3_ROOT/libm1qn3.a $M1QN3_ROOT/libddot.a"
-	  AC_DEFINE([_HAVE_M1QN3_],[1],[with M1QN3 in ISSM src])
-	  AC_SUBST([M1QN3LIB])
-	fi
-	dnl }}}
-	dnl proj.4{{{
-	AC_ARG_WITH([proj4-dir],
-		AS_HELP_STRING([--with-proj4-dir=DIR], [proj4 root directory.]),
-		[PROJ4_ROOT=$withval],[PROJ4_ROOT="no"])
-
-	dnl Check whether proj4 is enabled
-	AC_MSG_CHECKING([for proj.4])
-	if test "x$PROJ4_ROOT" = "xno" ; then
+		if ! test -d "${M1QN3_ROOT}"; then
+			AC_MSG_ERROR([M1QN3 directory provided (${M1QN3_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_M1QN3}])
+
+	dnl M1QN3 libraries and header files
+	if test "x${HAVE_M1QN3}" == "xyes"; then
+		M1QN3LIB="${M1QN3_ROOT}/libm1qn3.a ${M1QN3_ROOT}/libddot.a"
+		AC_DEFINE([_HAVE_M1QN3_], [1], [with M1QN3 in ISSM src])
+		AC_SUBST([M1QN3LIB])
+	fi
+	dnl }}}
+	dnl PROJ.4{{{
+	AC_ARG_WITH(
+		[proj4-dir],
+		AS_HELP_STRING([--with-proj4-dir=DIR], [PROJ.4 root directory]),
+		[PROJ4_ROOT=${withval}],
+		[PROJ4_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for PROJ.4])
+	if test "x${PROJ4_ROOT}" == "xno"; then
 		HAVE_PROJ4=no
 	else
 		HAVE_PROJ4=yes
-		if ! test -d "$PROJ4_ROOT"; then
-			AC_MSG_ERROR([proj4 directory provided ($PROJ4_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_PROJ4)
-
-	dnl proj4 headers and libraries
-	if test "x$HAVE_PROJ4" == "xyes"; then
-	  PROJ4INCL="-I$PROJ4_ROOT/include "
-	  PROJ4LIB="-L$PROJ4_ROOT/lib -lproj"
-	  AC_DEFINE([_HAVE_PROJ4_],[1],[with PROJ4 in ISSM src])
-	  AC_SUBST([PROJ4INCL])
-	  AC_SUBST([PROJ4LIB])
-	fi
-	AM_CONDITIONAL([PROJ4],[test x$HAVE_PROJ4 = xyes])
-	dnl }}}
-	dnl slepc{{{
-	AC_ARG_WITH([slepc-dir],
-	  AS_HELP_STRING([--with-slepc-dir=DIR],[slepc root directory]),
-	  [SLEPC_ROOT=$withval],[SLEPC_ROOT="no"])
-
-	dnl Check whether slepc is enabled
-	AC_MSG_CHECKING([for slepc])
-	if test "x$SLEPC_ROOT" = "xno" ; then
+		if ! test -d "${PROJ4_ROOT}"; then
+			AC_MSG_ERROR([PROJ.4 directory provided (${PROJ4_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_PROJ4}])
+
+	dnl PROJ.4 libraries and header files
+	if test "x${HAVE_PROJ4}" == "xyes"; then
+		PROJ4INCL="-I${PROJ4_ROOT}/include"
+		PROJ4LIB="-L${PROJ4_ROOT}/lib -lproj"
+		AC_DEFINE([_HAVE_PROJ4_], [1], [with PROJ4 in ISSM src])
+		AC_SUBST([PROJ4INCL])
+		AC_SUBST([PROJ4LIB])
+	fi
+	AM_CONDITIONAL([PROJ4], [test "x${HAVE_PROJ4}" == "xyes"])
+	dnl }}}
+	dnl SLEPc{{{
+	AC_ARG_WITH(
+		[slepc-dir],
+		AS_HELP_STRING([--with-slepc-dir=DIR], [SLEPc root directory]),
+		[SLEPC_ROOT=${withval}],
+		[SLEPC_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SLEPc])
+	if test "x${SLEPC_ROOT}" == "xno"; then
 		HAVE_SLEPC=no
 	else
 		HAVE_SLEPC=yes
-		if ! test -d "$SLEPC_ROOT"; then
-			AC_MSG_ERROR([slepc directory provided ($SLEPC_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SLEPC)
-
-	dnl slepc headers and libraries
-	if test "x$HAVE_SLEPC" == "xyes"; then
-		SLEPCINCL=-I"$SLEPC_ROOT/include"
-		SLEPCLIB=-L"$SLEPC_ROOT/lib/ -lslepc"
-		AC_DEFINE([_HAVE_SLEPC_],[1],[with Slepc in ISSM src])
+		if ! test -d "${SLEPC_ROOT}"; then
+			AC_MSG_ERROR([SLEPc directory provided (${SLEPC_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SLEPC}])
+
+	dnl SLEPc libraries and header files
+	if test "x${HAVE_SLEPC}" == "xyes"; then
+		SLEPCINCL="-I${SLEPC_ROOT}/include"
+		SLEPCLIB="-L${SLEPC_ROOT}/lib -lslepc"
+		AC_DEFINE([_HAVE_SLEPC_], [1], [with SLEPc in ISSM src])
 		AC_SUBST([SLEPCINCL])
 		AC_SUBST([SLEPCLIB])
@@ -1409,142 +1527,168 @@
 	dnl }}}
 	dnl shapelib{{{
-	AC_ARG_WITH([shapelib-dir],
-	  AS_HELP_STRING([--with-shapelib-dir=DIR], [shapelib root directory]),
-	  [SHAPELIB_ROOT=$withval],[SHAPELIB_ROOT="no"])
-
-	dnl Check whether shapelib is enabled
+	AC_ARG_WITH(
+		[shapelib-dir],
+		AS_HELP_STRING([--with-shapelib-dir=DIR], [shapelib root directory]),
+		[SHAPELIB_ROOT=${withval}],
+		[SHAPELIB_ROOT="no"]
+	)
 	AC_MSG_CHECKING([for shapelib])
-	if test "x$SHAPELIB_ROOT" = "xno" ; then
+	if test "x${SHAPELIB_ROOT}" == "xno"; then
 		HAVE_SHAPELIB=no
 	else
 		HAVE_SHAPELIB=yes
-		if ! test -d "$SHAPELIB_ROOT"; then
-			AC_MSG_ERROR([shapelib directory provided ($SHAPELIB_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SHAPELIB)
-
-	dnl shapelib headers and libraries
-	if test "x$HAVE_SHAPELIB" == "xyes"; then
-		SHAPELIBINCL=-I"$SHAPELIB_ROOT/include"
-		SHAPELIBLIB=-L"$SHAPELIB_ROOT/lib/ -lshape"
-		AC_DEFINE([_HAVE_SHAPELIB_],[1],[with Shapelib in ISSM src])
+		if ! test -d "${SHAPELIB_ROOT}"; then
+			AC_MSG_ERROR([shapelib directory provided (${SHAPELIB_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SHAPELIB}])
+
+	dnl shapelib libraries and header files
+	if test "x${HAVE_SHAPELIB}" == "xyes"; then
+		SHAPELIBINCL="-I${SHAPELIB_ROOT}/include"
+		SHAPELIBLIB="-L${SHAPELIB_ROOT}/lib -lshape"
+		AC_DEFINE([_HAVE_SHAPELIB_], [1], [with shapelib in ISSM src])
 		AC_SUBST([SHAPELIBINCL])
 		AC_SUBST([SHAPELIBLIB])
 	fi
 	dnl }}}
-	dnl scalapack{{{
-
-	dnl Here, either just the directory is provided, or the library, we handle both cases
-	AC_ARG_WITH([scalapack-dir],
-	  AS_HELP_STRING([--with-scalapack-dir=DIR],[scalapack root directory]),
-	  [SCALAPACK_ROOT=$withval],[SCALAPACK_ROOT="no"])
-
-	AC_ARG_WITH([scalapack-lib],
-	  AS_HELP_STRING([--with-scalapack-lib=LIBS],[scalapack libraries to include]),
-	  [SCALAPACKLIB=$withval],[SCALAPACKLIB="no"])
-
-	dnl Check whether scalapack is enabled
-	AC_MSG_CHECKING([for scalapack])
-	if test "x$SCALAPACK_ROOT" = "xno" && test "x$SCALAPACKLIB" = "xno"; then
-			HAVE_SCALAPACK=no
-			SCALAPACKLIB=
-	else if test "x$SCALAPACK_ROOT" != "xno"; then
-		if ! test -d "$SCALAPACK_ROOT"; then
-		 AC_MSG_ERROR([scalapack directory provided ($SCALAPACK_ROOT) does not exist]);
+	dnl ScaLAPACK{{{
+
+	dnl NOTE: User should supply path to root directory or libraries, but not both
+	AC_ARG_WITH(
+		[scalapack-dir],
+		AS_HELP_STRING([--with-scalapack-dir=DIR], [ScaLAPACK root directory]),
+		[SCALAPACK_ROOT=${withval}],
+		[SCALAPACK_ROOT="no"]
+	)
+	AC_ARG_WITH(
+		[scalapack-lib],
+		AS_HELP_STRING([--with-scalapack-lib=LIBS], [ScaLAPACK libraries to link to]),
+		[SCALAPACKLIB=${withval}],
+		[SCALAPACKLIB="no"]
+	)
+	AC_MSG_CHECKING([for ScaLAPACK])
+	if test "x${SCALAPACK_ROOT}" == "xno" && test "x${SCALAPACKLIB}" == "xno"; then
+		HAVE_SCALAPACK=no
+		SCALAPACKLIB=""
+	elif test "x${SCALAPACK_ROOT}" != "xno"; then
+		if ! test -d "${SCALAPACK_ROOT}"; then
+			AC_MSG_ERROR([ScaLAPACK directory provided (${SCALAPACK_ROOT}) does not exist!]);
 		fi
 		HAVE_SCALAPACK=yes
-		#SCALAPACKLIB=-L"$SCALAPACK_ROOT/ -lscalapack-openmpi"
-		SCALAPACKLIB=-L"$SCALAPACK_ROOT/ -lscalapack"
-	else if test "x$SCALAPACKLIB" != "xno"; then
+		SCALAPACKLIB="-L${SCALAPACK_ROOT}/lib -lscalapack"
+	elif test "x${SCALAPACKLIB}" != "xno"; then
+		dnl Value of SCALAPACKLIB should be valid here, so no need to set it (as above)
 		HAVE_SCALAPACK=yes
 	else
-	   AC_MSG_ERROR([cannot provide both scalapack dir and scalapack-lib])
-	fi
-	fi
-	fi
-	AC_MSG_RESULT($HAVE_SCALAPACK)
-
-	dnl scalapack headers and libraries
-	if test "x$HAVE_SCALAPACK" = "xyes"; then
-		AC_DEFINE([_HAVE_SCALAPACK_],[1],[with Scalapack in ISSM src])
+		AC_MSG_ERROR([use --with-scalapack-dir or --with-scalapack-lib, but not both])
+	fi
+	AC_MSG_RESULT([${HAVE_SCALAPACK}])
+
+	dnl ScaLAPACK libraries and header files
+	if test "x${HAVE_SCALAPACK}" == "xyes"; then
+		AC_DEFINE([_HAVE_SCALAPACK_], [1], [with ScaLAPACK in ISSM src])
 		AC_SUBST([SCALAPACKLIB])
 	fi
 	dnl }}}
-	dnl blas-lapack{{{
-	AC_ARG_WITH([blas-lapack-dir],
-	  AS_HELP_STRING([--with-blas-lapack-dir=DIR],[blas-lapack root directory]),
-	  [BLASLAPACK_ROOT=$withval],[BLASLAPACK_ROOT="no"])
-
-	dnl Check whether blas-lapack is enabled
-	AC_MSG_CHECKING([for blas-lapack])
-	if test "x$BLASLAPACK_ROOT" = "xno" ; then
+	dnl BLAS/LAPACK{{{
+	AC_ARG_WITH(
+		[blas-lapack-dir],
+		AS_HELP_STRING([--with-blas-lapack-dir=DIR], [BLAS/LAPACK root directory]),
+		[BLASLAPACK_ROOT=$withval],
+		[BLASLAPACK_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for BLAS/LAPACK])
+	if test "x${BLASLAPACK_ROOT}" == "xno" ; then
 		HAVE_BLASLAPACK=no
 	else
 		HAVE_BLASLAPACK=yes
-		if ! test -d "$BLASLAPACK_ROOT"; then
-			AC_MSG_ERROR([blas-lapack directory provided ($BLASLAPACK_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_BLASLAPACK)
-
-	dnl blas-lapack headers and libraries
-	if test "x$HAVE_BLASLAPACK" == "xyes"; then
+		if ! test -d "${BLASLAPACK_ROOT}"; then
+			AC_MSG_ERROR([BLAS/LAPACK directory provided (${BLASLAPACK_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_BLASLAPACK}])
+
+	dnl BLAS/LAPACK libraries and header files
+	if test "x${HAVE_BLASLAPACK}" == "xyes"; then
 		BLASLAPACKINCL=""
-		dnl: branch on whether we are running on windows or linux.
 		case "${host_os}" in
-		*cygwin*)
-		  BLASLAPACKLIB="-L`cygpath -m $BLASLAPACK_ROOT` -Wl,libf2cblas.lib  -Wl,libf2clapack.lib"
-		  ;;
-		*linux*)
-				BLASLAPACKLIB=-L"$BLASLAPACK_ROOT/lib -lflapack -lfblas "
-		#BLASLAPACKLIB=-L"$BLASLAPACK_ROOT/lib -llapack -lblas "
-		  ;;
-		*darwin*)
-		  BLASLAPACKLIB=-L"$BLASLAPACK_ROOT/lib -lflapack -lfblas "
-		  ;;
+			*cygwin*)
+				BLASLAPACKLIB="-L`cygpath -m ${BLASLAPACK_ROOT}` -Wl,libf2cblas.lib  -Wl,libf2clapack.lib"
+			;;
+			*darwin*)
+				BLASLAPACKLIB="-L${BLASLAPACK_ROOT}/lib"
+				if ls ${BLASLAPACK_ROOT}/lib/libopenblas.* 1> /dev/null 2>&1; then
+					BLASLAPACKLIB+=" -lopenblas"
+				elif ls ${BLASLAPACK_ROOT}/lib/libf2clapack.* 1> /dev/null 2>&1; then
+					BLASLAPACKLIB+=" -lf2clapack -lf2cblas"
+				elif ls ${BLASLAPACK_ROOT}/lib/libflapack.* 1> /dev/null 2>&1; then
+					BLASLAPACKLIB+=" -lflapack -lfblas"
+				else
+					BLASLAPACKLIB+=" -llapack -lblas"
+				fi
+			;;
+			*linux*)
+				BLASLAPACKLIB="-L${BLASLAPACK_ROOT}/lib"
+				if ls ${BLASLAPACK_ROOT}/lib/libopenblas.* 1> /dev/null 2>&1; then
+					BLASLAPACKLIB+=" -lopenblas"
+				elif ls ${BLASLAPACK_ROOT}/lib/libf2clapack.* 1> /dev/null 2>&1; then
+					BLASLAPACKLIB+=" -lf2clapack -lf2cblas"
+				elif ls ${BLASLAPACK_ROOT}/lib/libflapack.* 1> /dev/null 2>&1; then
+					BLASLAPACKLIB+=" -lflapack -lfblas"
+				else
+					BLASLAPACKLIB+=" -llapack -lblas"
+				fi
+			;;
 		esac
-		AC_DEFINE([_HAVE_BLASLAPACK_],[1],[with blas lapack in ISSM src])
+		AC_DEFINE([_HAVE_BLASLAPACK_], [1], [with BLAS/LAPACK in ISSM src])
 		AC_SUBST([BLASLAPACKLIB])
 		AC_SUBST([BLASLAPACKINCL])
 	fi
 	dnl }}}
-	dnl mkl{{{
-		AC_ARG_WITH([mkl-libflags],
-					AS_HELP_STRING([--with-mkl-libflags=LIBS],[mkl libraries to be used]),
-					[MKL_LIBFLAGS=$withval],[MKL_LIBFLAGS="no"])
-
-		  dnl Check whether mkl is enabled
-		  AC_MSG_CHECKING([for mkl])
-		  if test "x$MKL_LIBFLAGS" = "xno" ; then
-				HAVE_MKL=no
-		  else
-				HAVE_MKL=yes
-				MKLLIB="$MKL_LIBFLAGS"
-				AC_DEFINE([_HAVE_MKL_],[1],[with mkl in ISSM src])
-				AC_SUBST([MKLLIB])
-				AC_SUBST([MKLINCL])
-			fi
-			AC_MSG_RESULT($HAVE_MKL)
-	dnl }}}
-	dnl plapack{{{
-	AC_MSG_CHECKING(for plapack)
-
-	AC_ARG_WITH([plapack-lib],
-	  AS_HELP_STRING([--with-plapack-lib = lib],[plapack library]),
-	  [PLAPACK_LIB=$withval],[PLAPACK_LIB=""])
-
-	AC_ARG_WITH([plapack-include],
-			  AS_HELP_STRING([--with-plapack-include = include],
-							 [plapack include ]),
-			  [PLAPACK_INCLUDE=$withval],[PLAPACK_INCLUDE=""])
-
-	if test -n "$PLAPACK_LIB"; then
-		if test -n "$PLAPACK_INCLUDE"; then
-
+	dnl Math Kernel Library (MKL){{{
+	AC_ARG_WITH(
+		[mkl-libflags],
+		AS_HELP_STRING([--with-mkl-libflags=LIBS], [MKL libraries to be used]),
+		[MKL_LIBFLAGS=${withval}],
+		[MKL_LIBFLAGS="no"]
+	)
+	AC_MSG_CHECKING([for MKL])
+	if test "x${MKL_LIBFLAGS}" == "xno"; then
+		HAVE_MKL=no
+	else
+		HAVE_MKL=yes
+		MKLLIB="${MKL_LIBFLAGS}"
+		AC_DEFINE([_HAVE_MKL_], [1], [with MKL in ISSM src])
+		AC_SUBST([MKLLIB])
+		AC_SUBST([MKLINCL])
+	fi
+	AC_MSG_RESULT([${HAVE_MKL}])
+	dnl }}}
+	dnl PlaLAPACK{{{
+
+	dnl TODO: 	Handle user supplying path to root directory *or* individual
+	dnl 		arguments (like ScaLAPACK)
+	dnl
+	AC_MSG_CHECKING(for PlaLAPACK)
+	AC_ARG_WITH(
+		[plapack-lib],
+		AS_HELP_STRING([--with-plapack-lib=<LIB>], [PlaLAPACK library]),
+		[PLAPACK_LIB=${withval}],
+		[PLAPACK_LIB=""]
+	)
+	AC_ARG_WITH(
+		[plapack-include],
+		AS_HELP_STRING([--with-plapack-include=<INC>], [PlaLAPACK include]),
+		[PLAPACK_INCLUDE=${withval}],
+		[PLAPACK_INCLUDE=""]
+	)
+
+	if test -n "${PLAPACK_LIB}"; then
+		if test -n "${PLAPACK_INCLUDE}"; then
 			HAVE_PLAPACK=yes
-			PLAPACKINCL="$PLAPACK_INCLUDE"
-			PLAPACKLIB="$PLAPACK_LIB"
-			AC_DEFINE([_HAVE_PLAPACK_],[1],[with Plapack in ISSM src])
+			PLAPACKINCL="${PLAPACK_INCLUDE}"
+			PLAPACKLIB="${PLAPACK_LIB}"
+			AC_DEFINE([_HAVE_PLAPACK_], [1], [with PlaLAPACK in ISSM src])
 			AC_SUBST([PLAPACKINCL])
 			AC_SUBST([PLAPACKLIB])
@@ -1552,606 +1696,638 @@
 			HAVE_PLAPACK=no
 		fi
-	else
+	elses
 		HAVE_PLAPACK=no
 	fi
-	AC_MSG_RESULT($HAVE_PLAPACK)
-	dnl }}}
-	dnl mumps{{{
-	AC_ARG_WITH([mumps-dir],
-	  AS_HELP_STRING([--with-mumps-dir=DIR],[mumps root directory]),
-	  [MUMPS_ROOT=$withval],[MUMPS_ROOT="no"])
-
-	dnl Check whether mumps is enabled
-	AC_MSG_CHECKING([for mumps])
-	if test "x$MUMPS_ROOT" = "xno" ; then
+	AC_MSG_RESULT([${HAVE_PLAPACK}])
+	dnl }}}
+	dnl MUMPS{{{
+	AC_ARG_WITH(
+		[mumps-dir],
+		AS_HELP_STRING([--with-mumps-dir=DIR], [MUMPS root directory]),
+		[MUMPS_ROOT=${withval}],
+		[MUMPS_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for MUMPS])
+	if test "x${MUMPS_ROOT}" == "xno"; then
 		HAVE_MUMPS=no
 	else
 		HAVE_MUMPS=yes
-		if ! test -d "$MUMPS_ROOT"; then
-			AC_MSG_ERROR([mumps directory provided ($MUMPS_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_MUMPS)
-
-	dnl mumps headers and libraries
-	if test "x$HAVE_MUMPS" == "xyes"; then
-		MUMPSINCL=-I"$MUMPS_ROOT/include"
-		if test "$PETSC_MAJOR" = "2" ; then
-			MUMPSLIB=-L"$MUMPS_ROOT/lib "
+		if ! test -d "${MUMPS_ROOT}"; then
+			AC_MSG_ERROR([MUMPS directory provided (${MUMPS_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_MUMPS}])
+
+	dnl MUMPS libraries and header files
+	if test "x${HAVE_MUMPS}" == "xyes"; then
+		MUMPSINCL="-I${MUMPS_ROOT}/include"
+		if test "x${MUMPS_ROOT}" == "x${PETSC_ROOT}"; then
+			if test "x${PETSC_MAJOR}" == "x2"; then
+				MUMPSLIB="-L${MUMPS_ROOT}/lib "
+			else
+				MUMPSLIB="-L${MUMPS_ROOT}/lib -ldmumps -lcmumps -lmumps_common -lpord -lparmetis -lzmumps -lmetis"
+			fi
 		else
-			MUMPSLIB=-L"$MUMPS_ROOT/lib -ldmumps -lcmumps  -lmumps_common -lpord -lparmetis -lzmumps -lmetis"
-			dnl MUMPSLIB=-L"$MUMPS_ROOT/lib "
-		fi
-		AC_DEFINE([_HAVE_MUMPS_],[1],[with Mumps in ISSM src])
+			MUMPSLIB="-L${MUMPS_ROOT}/lib -ldmumps -lmumps_common -lpord"
+		fi
+		AC_DEFINE([_HAVE_MUMPS_], [1], [with MUMPS in ISSM src])
 		AC_SUBST([MUMPSINCL])
 		AC_SUBST([MUMPSLIB])
 	fi
-	AM_CONDITIONAL([MUMPS], [test x$HAVE_MUMPS = xyes])
-	dnl }}}
-	dnl mumps2{{{
-	if test "x$HAVE_MUMPS" != "xyes"; then
-	AC_MSG_CHECKING(for mumps2 (stand alone))
-
-	AC_ARG_WITH([mumps2-include],
-	  AS_HELP_STRING([--with-mumps2-include=DIR],[mumps2 include directory, necessary for parallel build]),
-	  [MUMPS_INCLUDE=$withval],[MUMPS_INCLUDE=""])
-
-	AC_ARG_WITH([mumps2-libflags],
-	  AS_HELP_STRING([--with-mumps2-libflags=LIBS],[mumps2 libraries to be used, necessary for parallel build]),
-	  [MUMPS_LIBFLAGS=$withval],[MUMPS_LIBFLAGS=""])
-
-
-	if test -z "$MUMPS_INCLUDE" ; then
-		HAVE_MUMPS=no
-	else
-		HAVE_MUMPS=yes
-
-		if test -z "$MUMPS_LIBDIR"; then
-			MUMPSINCL=-I"$MUMPS_INCLUDE"
-			MUMPSLIB="$MUMPS_LIBFLAGS"
+	AM_CONDITIONAL([MUMPS], [test "x${HAVE_MUMPS}" == "xyes"])
+	dnl }}}
+	dnl MUMPS2{{{
+	if test "x${HAVE_MUMPS}" != "xyes"; then
+		AC_MSG_CHECKING(for MUMPS2 (standalone))
+		AC_ARG_WITH(
+			[mumps2-include],
+			AS_HELP_STRING([--with-mumps2-include=DIR], [MUMPS2 include directory, necessary for parallel build]),
+			[MUMPS_INCLUDE=${withval}],
+			[MUMPS_INCLUDE=""]
+		)
+		AC_ARG_WITH(
+			[mumps2-libflags],
+			AS_HELP_STRING([--with-mumps2-libflags=LIBS], [MUMPS2 libraries to be used, necessary for parallel build]),
+			[MUMPS_LIBFLAGS=${withval}],
+			[MUMPS_LIBFLAGS=""]
+		)
+		if test -z "${MUMPS_INCLUDE}"; then
+			HAVE_MUMPS=no
 		else
-			MUMPSINCL=-I"$MUMPS_INCLUDE"
-			MUMPSLIB="-L$MUMPS_LIBDIR $MUMPS_LIBFLAGS"
-		fi
-		AC_DEFINE([_HAVE_MUMPS_],[1],[with MUMPS])
-		AC_SUBST([MUMPSINCL])
-		AC_SUBST([MUMPSLIB])
-	fi
-	AM_CONDITIONAL([MUMPS], [test x$HAVE_MUMPS = xyes])
-	AC_MSG_RESULT($HAVE_MUMPS)
-
-	fi
-	dnl }}}
-	dnl blacs{{{
-	AC_ARG_WITH([blacs-dir],
-		AS_HELP_STRING([--with-blacs-dir=DIR],[blacs root directory]),
-			  [BLACS_ROOT=$withval],[BLACS_ROOT="no"])
-
-	dnl Check whether blacs is enabled
-	AC_MSG_CHECKING([for blacs])
-	if test "x$BLACS_ROOT" = "xno" ; then
+			HAVE_MUMPS=yes
+
+			if test -z "${MUMPS_LIBDIR}"; then
+				MUMPSINCL="-I${MUMPS_INCLUDE}"
+				MUMPSLIB="${MUMPS_LIBFLAGS}"
+			else
+				MUMPSINCL="-I${MUMPS_INCLUDE}"
+				MUMPSLIB="-L${MUMPS_LIBDIR} ${MUMPS_LIBFLAGS}"
+			fi
+			AC_DEFINE([_HAVE_MUMPS_], [1], [with MUMPS])
+			AC_SUBST([MUMPSINCL])
+			AC_SUBST([MUMPSLIB])
+		fi
+		AM_CONDITIONAL([MUMPS], [test "x${HAVE_MUMPS}" == "xyes"])
+		AC_MSG_RESULT([${HAVE_MUMPS}])
+	fi
+	dnl }}}
+	dnl BLACS{{{
+	AC_ARG_WITH(
+		[blacs-dir],
+		AS_HELP_STRING([--with-blacs-dir=DIR], [BLACS root directory]),
+		[BLACS_ROOT=${withval}],
+		[BLACS_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for BLACS])
+	if test "x${BLACS_ROOT}" == "xno"; then
 		HAVE_BLACS=no
 	else
 		HAVE_BLACS=yes
-		if ! test -d "$BLACS_ROOT"; then
-			AC_MSG_ERROR([blacs directory provided ($BLACS_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_BLACS)
-
-	dnl blacs headers and libraries
-	if test "x$HAVE_BLACS" == "xyes"; then
+		if ! test -d "${BLACS_ROOT}"; then
+			AC_MSG_ERROR([BLACS directory provided (${BLACS_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_BLACS}])
+
+	dnl BLACS libraries and header files
+	if test "x${HAVE_BLACS}" == "xyes"; then
 		BLACSINCL=""
-		BLACSLIB=-L"$BLACS_ROOT/ -lblacs"
-		AC_DEFINE([_HAVE_BLACS_],[1],[with Blacs in ISSM src])
+		BLACSLIB="-L${BLACS_ROOT} -lblacs"
+		AC_DEFINE([_HAVE_BLACS_], [1], [with BLACS in ISSM src])
 		AC_SUBST([BLACSINCL])
 		AC_SUBST([BLACSLIB])
 	fi
 	dnl }}}
-	dnl hypre{{{
-	AC_ARG_WITH([hypre-dir],
-	  AS_HELP_STRING([--with-hypre-dir=DIR],[hypre root directory]),
-			  [HYPRE_ROOT=$withval],[HYPRE_ROOT="no"])
-
-	dnl Check whether hypre is enabled
-	AC_MSG_CHECKING([for hypre])
-	if test "x$HYPRE_ROOT" = "xno" ; then
+	dnl HYPRE{{{
+	AC_ARG_WITH(
+		[hypre-dir],
+		AS_HELP_STRING([--with-hypre-dir=DIR], [HYPRE root directory]),
+		[HYPRE_ROOT=${withval}],
+		[HYPRE_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for HYPRE])
+	if test "x${HYPRE_ROOT}" == "xno"; then
 		HAVE_HYPRE=no
 	else
 		HAVE_HYPRE=yes
-		if ! test -d "$HYPRE_ROOT"; then
-			AC_MSG_ERROR([hypre directory provided ($HYPRE_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_HYPRE)
-
-	dnl hypre headers and libraries
-	if test "x$HAVE_HYPRE" == "xyes"; then
+		if ! test -d "${HYPRE_ROOT}"; then
+			AC_MSG_ERROR([HYPRE directory provided (${HYPRE_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_HYPRE}])
+
+	dnl HYPRE libraries and header files
+	if test "x${HAVE_HYPRE}" == "xyes"; then
 		HYPREINCL=""
-		HYPRELIB=-L"$HYPRE_ROOT/lib -lHYPRE"
-		AC_DEFINE([_HAVE_HYPRE_],[1],[with Hypre in ISSM src])
+		HYPRELIB="-L${HYPRE_ROOT}/lib -lHYPRE"
+		AC_DEFINE([_HAVE_HYPRE_], [1], [with HYPRE in ISSM src])
 		AC_SUBST([HYPREINCL])
 		AC_SUBST([HYPRELIB])
 	fi
 	dnl }}}
-	dnl prometheus{{{
-	AC_ARG_WITH([prometheus-dir],
-				AS_HELP_STRING([--with-prometheus-dir=DIR],[prometheus root directory]),
-				[PROMETHEUS_ROOT=$withval],[PROMETHEUS_ROOT="no"])
-
-		dnl Check whether prometheus is enabled
-		AC_MSG_CHECKING([for prometheus])
-		if test "x$PROMETHEUS_ROOT" = "xno" ; then
-			HAVE_PROMETHEUS=no
-		else
-			HAVE_PROMETHEUS=yes
-			if ! test -d "$PROMETHEUS_ROOT"; then
-				AC_MSG_ERROR([prometheus directory provided ($PROMETHEUS_ROOT) does not exist]);
-			fi
-		fi
-		AC_MSG_RESULT($HAVE_PROMETHEUS)
-
-		dnl prometheus headers and libraries
-		if test "x$HAVE_PROMETHEUS" == "xyes"; then
-			 PROMETHEUSINCL=-I"$PROMETHEUS_ROOT/include"
-			 PROMETHEUSLIB=-L"$PROMETHEUS_ROOT/lib -lpromfei -lprometheus -lparmetis"
-			 AC_DEFINE([_HAVE_PROMETHEUS_],[1],[with Prometheus in ISSM src])
-			 AC_SUBST([PROMETHEUSINCL])
-			 AC_SUBST([PROMETHEUSLIB])
-	   fi
-		dnl }}}
-	dnl semic{{{
-	AC_ARG_WITH([semic-dir],
-		AS_HELP_STRING([--with-semic-dir=DIR], [Semic root directory.]),
-		[SEMIC_ROOT=$withval],[SEMIC_ROOT="no"])
-
-	dnl Check whether semic is enabled
-	AC_MSG_CHECKING([for semic])
-	if test "x$SEMIC_ROOT" = "xno" ; then
+	dnl Prometheus{{{
+	AC_ARG_WITH(
+		[prometheus-dir],
+		AS_HELP_STRING([--with-prometheus-dir=DIR], [Prometheus root directory]),
+		[PROMETHEUS_ROOT=${withval}],
+		[PROMETHEUS_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for Prometheus])
+	if test "x${PROMETHEUS_ROOT}" == "xno"; then
+		HAVE_PROMETHEUS=no
+	else
+		HAVE_PROMETHEUS=yes
+		if ! test -d "${PROMETHEUS_ROOT}"; then
+			AC_MSG_ERROR([Prometheus directory provided (${PROMETHEUS_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_PROMETHEUS}])
+
+	dnl Prometheus libraries and header files
+	if test "x${HAVE_PROMETHEUS}" == "xyes"; then
+		PROMETHEUSINCL="-I${PROMETHEUS_ROOT}/include"
+		PROMETHEUSLIB="-L${PROMETHEUS_ROOT}/lib -lpromfei -lprometheus -lparmetis"
+		AC_DEFINE([_HAVE_PROMETHEUS_], [1], [with Prometheus in ISSM src])
+		AC_SUBST([PROMETHEUSINCL])
+		AC_SUBST([PROMETHEUSLIB])
+	fi
+	dnl }}}
+	dnl SEMIC{{{
+	AC_ARG_WITH(
+		[semic-dir],
+		AS_HELP_STRING([--with-semic-dir=DIR], [SEMIC root directory]),
+		[SEMIC_ROOT=${withval}],
+		[SEMIC_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SEMIC])
+	if test "x${SEMIC_ROOT}" == "xno"; then
 		HAVE_SEMIC=no
 	else
 		HAVE_SEMIC=yes
-		if ! test -d "$SEMIC_ROOT"; then
-			AC_MSG_ERROR([semic directory provided ($SEMIC_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SEMIC)
-
-	dnl semic headers and libraries
-	if test "x$HAVE_SEMIC" == "xyes"; then
-		SEMICINCL="-I$SEMIC_ROOT/"
-		AC_DEFINE([_HAVE_SEMIC_],[1],[with semic in ISSM src])
-		SEMICLIB="$SEMIC_ROOT/libsurface_physics.a $SEMIC_ROOT/libutils.a"
+		if ! test -d "${SEMIC_ROOT}"; then
+			AC_MSG_ERROR([SEMIC directory provided (${SEMIC_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SEMIC}])
+
+	dnl SEMIC libraries and header files
+	if test "x${HAVE_SEMIC}" == "xyes"; then
+		SEMICINCL="-I${SEMIC_ROOT}"
+		AC_DEFINE([_HAVE_SEMIC_], [1], [with SEMIC in ISSM src])
+		SEMICLIB="-L${SEMIC_ROOT} -lsurface_physics -lutils"
 		AC_SUBST([SEMICLIB])
 		AC_SUBST([SEMICINCL])
 	fi
-	AM_CONDITIONAL([SEMIC],[test x$HAVE_SEMIC = xyes])
-	dnl }}}
-dnl spai{{{
-	AC_ARG_WITH([spai-dir],
-				AS_HELP_STRING([--with-spai-dir=DIR],[spai root directory]),
-				[SPAI_ROOT=$withval],[SPAI_ROOT="no"])
-
-		dnl Check whether spai is enabled
-		AC_MSG_CHECKING([for spai])
-		if test "x$SPAI_ROOT" = "xno" ; then
-			HAVE_SPAI=no
-		else
-			HAVE_SPAI=yes
-			if ! test -d "$SPAI_ROOT"; then
-				AC_MSG_ERROR([spai directory provided ($SPAI_ROOT) does not exist]);
-			fi
-		fi
-		AC_MSG_RESULT($HAVE_SPAI)
-
-		dnl spai headers and libraries
-		if test "x$HAVE_SPAI" == "xyes"; then
-			SPAIINCL=-I"$SPAI_ROOT/include"
-			SPAILIB=-L"$SPAI_ROOT/lib -lspai"
-			AC_DEFINE([_HAVE_SPAI_],[1],[with Spai in ISSM src])
-			AC_SUBST([SPAIINCL])
-			AC_SUBST([SPAILIB])
-		fi
-	  dnl }}}
-dnl superlu{{{
-	AC_ARG_WITH([superlu-dir],
-				AS_HELP_STRING([--with-superlu-dir=DIR],[superlu root directory]),
-				[SUPERLU_ROOT=$withval],[SUPERLU_ROOT="no"])
-
-	dnl Check whether superlu is enabled
-	AC_MSG_CHECKING([for superlu])
-	if test "x$SUPERLU_ROOT" = "xno" ; then
+	AM_CONDITIONAL([SEMIC], [test "x${HAVE_SEMIC}" == "xyes"])
+	dnl }}}
+	dnl SPAI{{{
+	AC_ARG_WITH(
+		[spai-dir],
+		AS_HELP_STRING([--with-spai-dir=DIR], [SPAI root directory]),
+		[SPAI_ROOT=${withval}],
+		[SPAI_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SPAI])
+	if test "x${SPAI_ROOT}" == "xno"; then
+		HAVE_SPAI=no
+	else
+		HAVE_SPAI=yes
+		if ! test -d "${SPAI_ROOT}"; then
+			AC_MSG_ERROR([SPAI directory provided (${SPAI_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SPAI}])
+
+	dnl SPAI libraries and header files
+	if test "x${HAVE_SPAI}" == "xyes"; then
+		SPAIINCL="-I${SPAI_ROOT}/include"
+		SPAILIB="-L${SPAI_ROOT}/lib -lspai"
+		AC_DEFINE([_HAVE_SPAI_], [1], [with SPAI in ISSM src])
+		AC_SUBST([SPAIINCL])
+		AC_SUBST([SPAILIB])
+	fi
+	dnl }}}
+	dnl SuperLU{{{
+	AC_ARG_WITH(
+		[superlu-dir],
+		AS_HELP_STRING([--with-superlu-dir=DIR], [SuperLU root directory]),
+		[SUPERLU_ROOT=${withval}],
+		[SUPERLU_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SuperLU])
+	if test "x${SUPERLU_ROOT}" == "xno"; then
 		HAVE_SUPERLU=no
 	else
 		HAVE_SUPERLU=yes
-		if ! test -d "$SUPERLU_ROOT"; then
-			AC_MSG_ERROR([superlu directory provided ($SUPERLU_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SUPERLU)
-
-	dnl superlu headers and libraries
-	if test "x$HAVE_SUPERLU" == "xyes"; then
-		  SUPERLUINCL=-I"$SUPERLU_ROOT/include"
-		  SUPERLULIB=-L"$SUPERLU_ROOT/lib -lsuperlu_dist"
-		  AC_DEFINE([_HAVE_SUPERLU_],[1],[with Superlu in ISSM src])
-		  AC_SUBST([SUPERLUINCL])
-		  AC_SUBST([SUPERLULIB])
-	 fi
-	 dnl }}}
-dnl spooles{{{
-	AC_ARG_WITH([spooles-dir],
-				AS_HELP_STRING([--with-spooles-dir=DIR],[spooles root directory]),
-				[SPOOLES_ROOT=$withval],[SPOOLES_ROOT="no"])
-
-	dnl Check whether spooles is enabled
-	AC_MSG_CHECKING([for spooles])
-	if test "x$SPOOLES_ROOT" = "xno" ; then
+		if ! test -d "${SUPERLU_ROOT}"; then
+			AC_MSG_ERROR([SuperLU directory provided (${SUPERLU_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SUPERLU}])
+
+	dnl SuperLU libraries and header files
+	if test "x${HAVE_SUPERLU}" == "xyes"; then
+		SUPERLUINCL="-I${SUPERLU_ROOT}/include"
+		SUPERLULIB="-L${SUPERLU_ROOT}/lib -lsuperlu_dist"
+		AC_DEFINE([_HAVE_SUPERLU_], [1], [with SuperLU in ISSM src])
+		AC_SUBST([SUPERLUINCL])
+		AC_SUBST([SUPERLULIB])
+	fi
+	dnl }}}
+	dnl SPOOLES{{{
+	AC_ARG_WITH(
+		[spooles-dir],
+		AS_HELP_STRING([--with-spooles-dir=DIR], [SPOOLES root directory]),
+		[SPOOLES_ROOT=${withval}],
+		[SPOOLES_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SPOOLES])
+	if test "x${SPOOLES_ROOT}" == "xno"; then
 		HAVE_SPOOLES=no
 	else
 		HAVE_SPOOLES=yes
-		if ! test -d "$SPOOLES_ROOT"; then
-			AC_MSG_ERROR([spooles directory provided ($SPOOLES_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SPOOLES)
-
-	dnl spooles headers and libraries
-	if test "x$HAVE_SPOOLES" == "xyes"; then
-		  SPOOLESINCL=-I"$SPOOLES_ROOT/include"
-		  SPOOLESLIB=-L"$SPOOLES_ROOT/lib -lspooles"
-		  AC_DEFINE([_HAVE_SPOOLES_],[1],[with Spooles in ISSM src])
-		  AC_SUBST([SPOOLESINCL])
-		  AC_SUBST([SPOOLESLIB])
-	 fi
-	 dnl }}}
-dnl pastix{{{
-	AC_ARG_WITH([pastix-dir],
-				AS_HELP_STRING([--with-pastix-dir=DIR],[pastix root directory]),
-				[PASTIX_ROOT=$withval],[PASTIX_ROOT="no"])
-
-	dnl Check whether pastix is enabled
-	AC_MSG_CHECKING([for pastix])
-	if test "x$PASTIX_ROOT" = "xno" ; then
+		if ! test -d "${SPOOLES_ROOT}"; then
+			AC_MSG_ERROR([SPOOLES directory provided (${SPOOLES_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SPOOLES}])
+
+	dnl SPOOLES libraries and header files
+	if test "x${HAVE_SPOOLES}" == "xyes"; then
+		SPOOLESINCL="-I${SPOOLES_ROOT}/include"
+		SPOOLESLIB="-L${SPOOLES_ROOT}/lib -lspooles"
+		AC_DEFINE([_HAVE_SPOOLES_], [1], [with SPOOLES in ISSM src])
+		AC_SUBST([SPOOLESINCL])
+		AC_SUBST([SPOOLESLIB])
+	fi
+	dnl }}}
+	dnl PaStiX{{{
+	AC_ARG_WITH(
+		[pastix-dir],
+		AS_HELP_STRING([--with-pastix-dir=DIR], [PaStiX root directory]),
+		[PASTIX_ROOT=${withval}],
+		[PASTIX_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for PaStiX])
+	if test "x${PASTIX_ROOT}" == "xno"; then
 		HAVE_PASTIX=no
 	else
 		HAVE_PASTIX=yes
-		if ! test -d "$PASTIX_ROOT"; then
-			AC_MSG_ERROR([pastix directory provided ($PASTIX_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_PASTIX)
-
-	dnl pastix headers and libraries
-	if test "x$HAVE_PASTIX" == "xyes"; then
-		  PASTIXINCL=-I"$PASTIX_ROOT/include"
-		  PASTIXLIB=-L"$PASTIX_ROOT/lib -lpastix_XXbit_mpi_smp_nobubble_int32_simple_real_scotch_i686_pc_linux -lptscotch -lptscotcherr -lpastix"
-		  AC_DEFINE([_HAVE_PASTIX_],[1],[with Pastix in ISSM src])
-		  AC_SUBST([PASTIXINCL])
-		  AC_SUBST([PASTIXLIB])
-  fi
-  dnl }}}
+		if ! test -d "${PASTIX_ROOT}"; then
+			AC_MSG_ERROR([PaStiX directory provided (${PASTIX_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_PASTIX}])
+
+	dnl PaStiX libraries and header files
+	if test "x${HAVE_PASTIX}" == "xyes"; then
+		PASTIXINCL="-I${PASTIX_ROOT}/include"
+		PASTIXLIB="-L${PASTIX_ROOT}/lib -lpastix_XXbit_mpi_smp_nobubble_int32_simple_real_scotch_i686_pc_linux -lptscotch -lptscotcherr -lpastix"
+		AC_DEFINE([_HAVE_PASTIX_], [1], [with PaStiX in ISSM src])
+		AC_SUBST([PASTIXINCL])
+		AC_SUBST([PASTIXLIB])
+	fi
+	dnl }}}
+	dnl }}}
 	dnl ml{{{
-	AC_ARG_WITH([ml-dir],
-	  AS_HELP_STRING([--with-ml-dir=DIR],[ml root directory]),
-			  [ML_ROOT=$withval],[ML_ROOT="no"])
-
-	dnl Check whether ml is enabled
+	AC_ARG_WITH(
+		[ml-dir],
+		AS_HELP_STRING([--with-ml-dir=DIR],[ml root directory]),
+		[ML_ROOT=$withval],
+		[ML_ROOT="no"]
+	)
 	AC_MSG_CHECKING([for ml])
-	if test "x$ML_ROOT" = "xno" ; then
+	if test "x${ML_ROOT}" == "xno"; then
 		HAVE_ML=no
 	else
 		HAVE_ML=yes
-		if ! test -d "$ML_ROOT"; then
-			AC_MSG_ERROR([ml directory provided ($ML_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_ML)
-
-	dnl ml headers and libraries
-	if test "x$HAVE_ML" == "xyes"; then
-		MLINCL=-I"$ML_ROOT/include"
-		MLLIB=-L"$ML_ROOT/lib -lml"
-		AC_DEFINE([_HAVE_ML_],[1],[with Blacs in ISSM src])
+		if ! test -d "${ML_ROOT}"; then
+			AC_MSG_ERROR([ml directory provided (${ML_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_ML}])
+
+	dnl ml libraries and header files
+	if test "x${HAVE_ML}" == "xyes"; then
+		MLINCL=-I"${ML_ROOT}/include"
+		MLLIB=-L"${ML_ROOT}/lib -lml"
+		AC_DEFINE([_HAVE_ML_], [1], [with ml in ISSM src])
 		AC_SUBST([MLINCL])
 		AC_SUBST([MLLIB])
 	fi
 	dnl }}}
-	dnl umfpack{{{
-		AC_ARG_WITH([umfpack-dir],
-		  AS_HELP_STRING([--with-umfpack-dir=DIR],[UMFPACK root directory]),
-					[UMFPACK_ROOT=$withval],[UMFPACK_ROOT="no"])
-
-	dnl Check whether umfpack is enabled
-	AC_MSG_CHECKING([for umfpack])
-	if test "x$UMFPACK_ROOT" = "xno" ; then
+	dnl UMFPACK{{{
+	AC_ARG_WITH(
+		[umfpack-dir],
+		AS_HELP_STRING([--with-umfpack-dir=DIR], [UMFPACK root directory]),
+		[UMFPACK_ROOT=${withval}],
+		[UMFPACK_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for UMFPACK])
+	if test "x${UMFPACK_ROOT}" == "xno"; then
 		HAVE_UMFPACK=no
 	else
 		HAVE_UMFPACK=yes
-		if ! test -d "$UMFPACK_ROOT"; then
-			AC_MSG_ERROR([umfpack directory provided ($UMFPACK_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_UMFPACK)
-
-	dnl umfpack headers and libraries
-	if test "x$HAVE_UMFPACK" == "xyes"; then
+		if ! test -d "${UMFPACK_ROOT}"; then
+			AC_MSG_ERROR([UMFPACK directory provided (${UMFPACK_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_UMFPACK}])
+
+	dnl UMFPACK libraries and header files
+	if test "x${HAVE_UMFPACK}" == "xyes"; then
 		UMFPACKINCL=""
-		UMFPACKLIB=-L"$UMFPACK_ROOT/lib -lumfpack -lumfpack.5.5.1"
-		AC_DEFINE([_HAVE_UMFPACK_],[1],[with UMFPACK in ISSM src])
+		UMFPACKLIB="-L${UMFPACK_ROOT}/lib -lumfpack -lumfpack.5.5.1"
+		AC_DEFINE([_HAVE_UMFPACK_], [1], [with UMFPACK in ISSM src])
 		AC_SUBST([UMFPACKINCL])
 		AC_SUBST([UMFPACKLIB])
 	fi
 	dnl }}}
-dnl math{{{
-	AC_MSG_CHECKING(for math library)
-	AC_ARG_WITH([math-lib],
-	  AS_HELP_STRING([--with-math-lib = otions],[math options, for ex: "/usr/lib/libm.a]),
-	  [MATH_LIB=$withval],[MATH_LIB=""])
-
-	dnl check that --with-math-lib may have been provided
-	if test -n "$MATH_LIB" ; then
+	dnl libm (GNU math library){{{
+	AC_MSG_CHECKING(for libm)
+	AC_ARG_WITH(
+		[math-lib],
+		AS_HELP_STRING([--with-math-lib=LIB], [libm (GNU math library) to use]),
+		[MATH_LIB=${withval}],
+		[MATH_LIB=""]
+	)
+	if test -n "${MATH_LIB}"; then
 		HAVE_MATH=yes
-		MATHLIB="$MATH_LIB"
-		AC_DEFINE([_HAVE_MATH_],[1],[with MATH in ISSM src])
+		MATHLIB="${MATH_LIB}"
+		AC_DEFINE([_HAVE_MATH_], [1], [with libm (GNU math library) in ISSM src])
 		AC_SUBST([MATHLIB])
 	fi
-	AC_MSG_RESULT(done)
-	dnl }}}
-	dnl math77{{{
-		AC_ARG_WITH([math77-dir],
-					AS_HELP_STRING([--with-math77-dir=DIR], [math77 root directory.]),
-					[MATH77_ROOT=$withval],[MATH77_ROOT="no"])
-
-	dnl Check whether math77 is enabled
-	AC_MSG_CHECKING([for math77])
-	if test "x$MATH77_ROOT" = "xno" ; then
+	AC_MSG_RESULT([done])
+	dnl }}}
+	dnl MATH77{{{
+	AC_ARG_WITH(
+		[math77-dir],
+		AS_HELP_STRING([--with-math77-dir=DIR], [MATH77 root directory]),
+		[MATH77_ROOT=${withval}],
+		[MATH77_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for MATH77])
+	if test "x${MATH77_ROOT}" == "xno"; then
 		HAVE_MATH77=no
 	else
 		HAVE_MATH77=yes
-		if ! test -d "$MATH77_ROOT"; then
-			AC_MSG_ERROR([math77 directory provided ($MATH77_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_MATH77)
-
-	dnl math77 headers and libraries
-	if test "x$HAVE_MATH77" == "xyes"; then
-		MATH77LIB="-L$MATH77_ROOT/ -lmath77"
-		AC_DEFINE([_HAVE_MATH77_],[1],[with math77 in ISSM src])
+		if ! test -d "${MATH77_ROOT}"; then
+			AC_MSG_ERROR([MATH77 directory provided (${MATH77_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_MATH77}])
+
+	dnl MATH77 libraries and header files
+	if test "x${HAVE_MATH77}" == "xyes"; then
+		MATH77LIB="-L${MATH77_ROOT} -lmath77"
+		AC_DEFINE([_HAVE_MATH77_], [1], [with MATH77 in ISSM src])
 		AC_SUBST([MATH77LIB])
-   fi
-	dnl }}}
-	dnl fortran{{{
-	AC_ARG_WITH([fortran],
-		AS_HELP_STRING([--with-fortran = YES], [do we compile fortran code (default is yes)]),
-		[FORTRAN=$withval],[FORTRAN=yes])
-	AC_MSG_CHECKING(for fortran compilation)
-	if test "x$FORTRAN" = "xyes"; then
-		dnl defaults
+	fi
+	dnl }}}
+	dnl Fortran{{{
+	AC_ARG_WITH(
+		[fortran],
+		AS_HELP_STRING([--with-fortran=YES], [do we compile Fortran code (default: yes)]),
+		[FORTRAN=${withval}],
+		[FORTRAN=yes]
+	)
+	AC_MSG_CHECKING(for Fortran compilation)
+	if test "x${FORTRAN}" == "xyes"; then
 		HAVE_FORTRAN=yes
-		AC_DEFINE([_HAVE_FORTRAN_],[1],[with fortran capability])
+		AC_DEFINE([_HAVE_FORTRAN_], [1], [with Fortran capability])
 	else
 		HAVE_FORTRAN=no
 	fi
-	AM_CONDITIONAL([FORTRAN], [test x$FORTRAN = xyes])
-	AC_MSG_RESULT($FORTRAN)
-
-	if test "x$FORTRAN" = "xyes"; then
-		dnl fortran library  option
-		AC_MSG_CHECKING(for fortran library)
-		AC_ARG_WITH([fortran-lib],
-		  AS_HELP_STRING([--with-fortran-lib = options],[fortran options, for ex: "/usr/lib/gfortran.a]),
-			[FORTRAN_LIB=$withval],[FORTRAN_LIB=""])
-
-		dnl check that --with-fortran-lib may have been provided
-		if test -n "$FORTRAN_LIB" ; then
-			dnl check that library provided EXISTS!
-		   FORTRAN_DIR=$(echo $FORTRAN_LIB | sed -e "s/-L//g" | awk '{print $[1]}')
-			if test -d "$FORTRAN_DIR" || test -f "$FORTRAN_DIR"; then
-				FORTRANLIB="$FORTRAN_LIB"
-				AC_DEFINE([_HAVE_FORTRAN_],[1],[with FORTRAN in ISSM src])
+	AM_CONDITIONAL([FORTRAN], [test "x${FORTRAN}" == "xyes"])
+	AC_MSG_RESULT([${FORTRAN}])
+
+	if test "x${FORTRAN}" == "xyes"; then
+		dnl Fortran library
+		AC_MSG_CHECKING([for Fortran library])
+		AC_ARG_WITH(
+			[fortran-lib],
+			AS_HELP_STRING([--with-fortran-lib=LIB], [Fortran library to use (and, if needed, libraries on which it depends)]),
+			[FORTRAN_LIB=${withval}],
+			[FORTRAN_LIB=""]
+		)
+		if test -n "${FORTRAN_LIB}"; then
+			FORTRAN_DIR=$(echo ${FORTRAN_LIB} | sed -e "s/-L//g" | awk '{print $[1]}')
+			if test -d "${FORTRAN_DIR}" || test -f "${FORTRAN_DIR}"; then
+				FORTRANLIB="${FORTRAN_LIB}"
+				AC_DEFINE([_HAVE_FORTRAN_], [1], [with Fortran library in ISSM src])
 				AC_SUBST([FORTRANLIB])
 			else
-			 if test "x$HAVE_MPI" = "xyes"; then
-				FORTRANLIB=$(mpif77 -print-file-name="libgfortran.a")
-				if test -f "$FORTRANLIB"; then
-					 AC_MSG_ERROR([fortran library provided ($FORTRAN_LIB) does not exist, MPI suggests the following library: $FORTRANLIB]);
-				fi
-			 fi
-				AC_MSG_ERROR([fortran library provided ($FORTRAN_LIB) does not exist!]);
+				if test "x${HAVE_MPI}" == "xyes"; then
+					MPI_REC_FORTRAN_LIB=$(mpif77 -print-file-name="libgfortran.a")
+					if test -f "${FORTRANLIB}"; then
+						AC_MSG_ERROR([Fortran library provided (${FORTRAN_LIB}) does not exist! MPI suggests the following library: ${MPI_REC_FORTRAN_LIB}]);
+					fi
+			 	fi
+				AC_MSG_ERROR([Fortran library provided (${FORTRAN_LIB}) does not exist!]);
 			fi
 		fi
-		AC_MSG_RESULT(done)
-	fi
-	dnl }}}
-	dnl graphics{{{
-	AC_MSG_CHECKING(for graphics library)
-	AC_ARG_WITH([graphics-lib],
-	  AS_HELP_STRING([--with-graphics-lib = options],[graphics options, for ex: "/usr/X11/lib/libX11.a]),
-	  [GRAPHICS_LIB=$withval],[GRAPHICS_LIB=""])
-
-	dnl check that --with-graphics-lib may have been provided
-	if test -n "$GRAPHICS_LIB" ; then
-		dnl check that library provided EXISTS!
-		GRAPHICS_DIR=$(echo $GRAPHICS_LIB | sed -e "s/-L//g" | awk '{print $[1]}')
-		if test -d "$GRAPHICS_DIR" || test -f "$GRAPHICS_DIR"; then
+		AC_MSG_RESULT([done])
+	fi
+	dnl }}}
+	dnl Xlib (graphics library){{{
+	AC_MSG_CHECKING([for Xlib (graphics library)])
+	AC_ARG_WITH(
+		[graphics-lib],
+		AS_HELP_STRING([--with-graphics-lib=options], [Xlib (graphics library) to use]),
+		[GRAPHICS_LIB=${withval}],
+		[GRAPHICS_LIB=""]
+	)
+	if test -n "${GRAPHICS_LIB}"; then
+		GRAPHICS_DIR=$(echo ${GRAPHICS_LIB} | sed -e "s/-L//g" | awk '{print $[1]}')
+		if test -d "${GRAPHICS_DIR}" || test -f "${GRAPHICS_DIR}"; then
 			HAVE_GRAPHICS=yes
-			GRAPHICSLIB="$GRAPHICS_LIB"
-			AC_DEFINE([_HAVE_GRAPHICS_],[1],[with GRAPHICS in ISSM src])
+			GRAPHICSLIB="${GRAPHICS_LIB}"
+			AC_DEFINE([_HAVE_GRAPHICS_], [1], [with Xlib (graphics library) in ISSM src])
 			AC_SUBST([GRAPHICSLIB])
 		else
-			if test -f "$PETSC_ROOT/conf/petscvariables"; then
-				GRAPHICSLIB=$(cat $PETSC_ROOT/conf/petscvariables | grep X_LIB)
-				AC_MSG_ERROR([graphics library provided ($GRAPHICS_LIB) does not exist, PETSc suggests the following library: $GRAPHICSLIB]);
+			if test -f "${PETSC_ROOT}/conf/petscvariables"; then
+				PETSC_REC_GRAPHICS_LIB=$(cat ${PETSC_ROOT}/conf/petscvariables | grep X_LIB)
+				AC_MSG_ERROR([Xlib (graphics library) provided (${GRAPHICS_LIB}) does not exist! PETSc suggests the following library: ${PETSC_REC_GRAPHICS_LIB}]);
 			fi
-			AC_MSG_ERROR([graphics library provided ($GRAPHICS_LIB$) does not exist!]);
-		fi
-	fi
-	AC_MSG_RESULT(done)
-	dnl }}}
-	dnl meteoio{{{
-	AC_ARG_WITH([meteoio-dir],
-	  AS_HELP_STRING([--with-meteoio-dir=DIR], [use meteoio in conjunction with snowpack model.]),
-	  [METEOIO_ROOT=$withval],[METEOIO_ROOT="no"])
-
-	dnl Check whether meteoio is enabled
-	AC_MSG_CHECKING([for meteoio])
-	if test "x$METEOIO_ROOT" = "xno" ; then
+			AC_MSG_ERROR([Xlib (graphics library) provided (${GRAPHICS_LIB}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([done])
+	dnl }}}
+	dnl MeteoIO{{{
+	AC_ARG_WITH(
+		[meteoio-dir],
+		AS_HELP_STRING([--with-meteoio-dir=DIR], [use MeteoIO in conjunction with SNOWPACK model]),
+		[METEOIO_ROOT=${withval}],
+		[METEOIO_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for MeteoIO])
+	if test "x${METEOIO_ROOT}" == "xno"; then
 		HAVE_METEOIO=no
 	else
 		HAVE_METEOIO=yes
-		if ! test -d "$METEOIO_ROOT"; then
-			AC_MSG_ERROR([meteoio directory provided ($METEOIO_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_METEOIO)
-
-	dnl meteoio headers and libraries
-	if test "x$HAVE_METEOIO" == "xyes"; then
-		METEOIOINCL="-I$METEOIO_ROOT/include"
-		METEOIOLIB="-dy -L$METEOIO_ROOT/lib  -lmeteoio "
-
-		AC_DEFINE([_HAVE_METEOIO_],[1],[with meteoio])
+		if ! test -d "${METEOIO_ROOT}"; then
+			AC_MSG_ERROR([MeteoIO directory provided (${METEOIO_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_METEOIO}])
+
+	dnl MeteoIO libraries and header files
+	if test "x${HAVE_METEOIO}" == "xyes"; then
+		METEOIOINCL="-I${METEOIO_ROOT}/include"
+		METEOIOLIB="-dy -L${METEOIO_ROOT}/lib -lmeteoio"
+
+		AC_DEFINE([_HAVE_METEOIO_], [1], [with MeteoIO])
 		AC_SUBST([METEOIOINCL])
 		AC_SUBST([METEOIOLIB])
 	fi
-	AM_CONDITIONAL([METEOIO], [test x$HAVE_METEOIO = xyes])
-	dnl }}}
-	dnl snowpack{{{
-	AC_ARG_WITH([snowpack-dir],
-	  AS_HELP_STRING([--with-snowpack-dir=DIR], [use snowpack for surface mass balance model.]),
-	  [SNOWPACK_ROOT=$withval],[SNOWPACK_ROOT="no"])
-
-	dnl Check whether snowpack is enabled
-	AC_MSG_CHECKING([for snowpack])
-	if test "x$SNOWPACK_ROOT" = "xno" ; then
+	AM_CONDITIONAL([METEOIO], [test "x${HAVE_METEOIO}" == "xyes"])
+	dnl }}}
+	dnl SNOWPACK{{{
+	AC_ARG_WITH(
+		[snowpack-dir],
+		AS_HELP_STRING([--with-snowpack-dir=DIR], [use SNOWPACK for surface mass balance model]),
+		[SNOWPACK_ROOT=${withval}],
+		[SNOWPACK_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for SNOWPACK])
+	if test "x${SNOWPACK_ROOT}" == "xno"; then
 		HAVE_SNOWPACK=no
 	else
 		HAVE_SNOWPACK=yes
-		if ! test -d "$SNOWPACK_ROOT"; then
-			AC_MSG_ERROR([snowpack directory provided ($SNOWPACK_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_SNOWPACK)
-
-	dnl snowpack headers and libraries
-	if test "x$HAVE_SNOWPACK" == "xyes"; then
-		SNOWPACKINCL="-I$SNOWPACK_ROOT/include"
-		SNOWPACKLIB="-dy -L$SNOWPACK_ROOT/lib  -lsnowpack "
-
-		AC_DEFINE([_HAVE_SNOWPACK_],[1],[with snowpack for surface mass balance model])
+		if ! test -d "${SNOWPACK_ROOT}"; then
+			AC_MSG_ERROR([SNOWPACK directory provided (${SNOWPACK_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_SNOWPACK}])
+
+	dnl SNOWPACK libraries and header files
+	if test "x${HAVE_SNOWPACK}" == "xyes"; then
+		SNOWPACKINCL="-I${SNOWPACK_ROOT}/include"
+		SNOWPACKLIB="-dy -L${SNOWPACK_ROOT}/lib -lsnowpack"
+		AC_DEFINE([_HAVE_SNOWPACK_], [1], [with SNOWPACK for surface mass balance model])
 		AC_SUBST([SNOWPACKINCL])
 		AC_SUBST([SNOWPACKLIB])
 	fi
-	AM_CONDITIONAL([SNOWPACK], [test x$HAVE_SNOWPACK = xyes])
-	dnl }}}
-	dnl neopz{{{
-	AC_ARG_WITH([neopz-dir],
-		AS_HELP_STRING([--with-neopz-dir=DIR], [neopz root directory.]),
-		[NEOPZ_ROOT=$withval],[NEOPZ_ROOT="no"])
-
-	dnl Check whether neopz is enabled
-	AC_MSG_CHECKING([for neopz])
-	if test "x$NEOPZ_ROOT" = "xno" ; then
+	AM_CONDITIONAL([SNOWPACK], [test "x${HAVE_SNOWPACK}" == "xyes"])
+	dnl }}}
+	dnl NeoPZ{{{
+	AC_ARG_WITH(
+		[neopz-dir],
+		AS_HELP_STRING([--with-neopz-dir=DIR], [NeoPZ root directory]),
+		[NEOPZ_ROOT=${withval}],
+		[NEOPZ_ROOT="no"]
+	)
+	AC_MSG_CHECKING([for NeoPZ])
+	if test "x${NEOPZ_ROOT}" == "xno"; then
 		HAVE_NEOPZ=no
 	else
 		HAVE_NEOPZ=yes
-		if ! test -d "$NEOPZ_ROOT"; then
-			AC_MSG_ERROR([neopz directory provided ($NEOPZ_ROOT) does not exist]);
-		fi
-	fi
-	AC_MSG_RESULT($HAVE_NEOPZ)
-
-	dnl neopz headers and libraries
-	if test "x$HAVE_NEOPZ" == "xyes"; then
-	  NEOPZLIB="$NEOPZ_ROOT/lib/libpz.a"
-     NEOPZINCL=" -I$NEOPZ_ROOT/include"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Analysis"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Common"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/External"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Frontal"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Geom"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Integral"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/LinearSolvers"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Material"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Matrix"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Mesh"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Multigrid"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/PerfUtil"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Post"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Pre"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Refine"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Save"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Shape"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/SpecialMaps"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/StrMatrix"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/SubStruct"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Topology"
-	  NEOPZINCL+=" -I$NEOPZ_ROOT/include/Util"
-	  CXXFLAGS+=" -std=c++11"
-	  AC_DEFINE([_HAVE_NEOPZ_],[1],[with NEOPZ in ISSM src])
-	  AC_SUBST([NEOPZINCL])
-	  AC_SUBST([NEOPZLIB])
-	fi
-	AM_CONDITIONAL([NEOPZ], [test x$HAVE_NEOPZ = xyes])
-	dnl }}}
-
+		if ! test -d "${NEOPZ_ROOT}"; then
+			AC_MSG_ERROR([NeoPZ directory provided (${NEOPZ_ROOT}) does not exist!]);
+		fi
+	fi
+	AC_MSG_RESULT([${HAVE_NEOPZ}])
+
+	dnl NeoPZ libraries and header files
+	if test "x${HAVE_NEOPZ}" == "xyes"; then
+		NEOPZLIB="${NEOPZ_ROOT}/lib/libpz.a"
+		NEOPZINCL="-I${NEOPZ_ROOT}/include"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Analysis"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Common"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/External"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Frontal"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Geom"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Integral"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/LinearSolvers"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Material"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Matrix"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Mesh"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Multigrid"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/PerfUtil"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Post"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Pre"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Refine"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Save"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Shape"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/SpecialMaps"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/StrMatrix"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/SubStruct"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Topology"
+		NEOPZINCL+=" -I${NEOPZ_ROOT}/include/Util"
+		CXXFLAGS+=" -std=c++11"
+		AC_DEFINE([_HAVE_NEOPZ_], [1], [with NeoPZ in ISSM src])
+		AC_SUBST([NEOPZINCL])
+		AC_SUBST([NEOPZLIB])
+	fi
+	AM_CONDITIONAL([NEOPZ], [test "x${HAVE_NEOPZ}" == "xyes"])
+	dnl }}}
 	dnl Capabilities
 	dnl with-bamg{{{
-	AC_ARG_WITH([bamg],
-		AS_HELP_STRING([--with-bamg = YES],[compile with bamg capabilities (default is yes)]),
-		[BAMG=$withval],[BAMG=yes])
-	AC_MSG_CHECKING(for bamg capability compilation)
-
+	AC_ARG_WITH(
+		[bamg],
+		AS_HELP_STRING([--with-bamg=YES], [compile with BAMG capabilities (default: yes)]),
+		[BAMG=${withval}],
+		[BAMG=yes]
+	)
+	AC_MSG_CHECKING([for BAMG capability compilation])
 	HAVE_BAMG=no
-	if test "x$BAMG" = "xyes"; then
+	if test "x${BAMG}" == "xyes"; then
 		HAVE_BAMG=yes
-		AC_DEFINE([_HAVE_BAMG_],[1],[with bamg meshing capability])
-	fi
-	AM_CONDITIONAL([BAMG], [test x$HAVE_BAMG = xyes])
-	AC_MSG_RESULT($HAVE_BAMG)
+		AC_DEFINE([_HAVE_BAMG_], [1], [with BAMG meshing capability])
+	fi
+	AM_CONDITIONAL([BAMG], [test "x${HAVE_BAMG}" == "xyes"])
+	AC_MSG_RESULT([${HAVE_BAMG}])
 	dnl }}}
 	dnl with-ocean{{{
-	AC_ARG_WITH([ocean],
-		AS_HELP_STRING([--with-ocean = YES],[compile with ice/ocean coupling (default is no)]),
-		[OCEAN=$withval],[OCEAN=no])
-	AC_MSG_CHECKING(for ice/ocean capability compilation)
+	AC_ARG_WITH(
+		[ocean],
+		AS_HELP_STRING([--with-ocean = YES], [compile with ice/ocean coupling capability (default: no)]),
+		[OCEAN=${withval}],
+		[OCEAN=no]
+	)
+	AC_MSG_CHECKING(for ice/ocean coupling capability compilation)
 
 	HAVE_OCEAN=no
-	if test "x$OCEAN" = "xyes"; then
+	if test "x${OCEAN}" == "xyes"; then
 		HAVE_OCEAN=yes
-		AC_DEFINE([_HAVE_OCEAN_],[1],[with ice/ocean coupling capability])
-	fi
-	AM_CONDITIONAL([OCEAN], [test x$HAVE_OCEAN = xyes])
-	AC_MSG_RESULT($HAVE_OCEAN)
+		AC_DEFINE([_HAVE_OCEAN_], [1], [with ice/ocean coupling capability])
+	fi
+	AM_CONDITIONAL([OCEAN], [test "x${HAVE_OCEAN}" == "xyes"])
+	AC_MSG_RESULT([${HAVE_OCEAN}])
 	dnl }}}
 	dnl with-kml{{{
-	AC_ARG_WITH([kml],
-		AS_HELP_STRING([--with-kml = YES],[compile with kml capabilities (default is no)]),
-		[KML=$withval],[KML=no])
+	AC_ARG_WITH(
+		[kml],
+		AS_HELP_STRING([--with-kml=YES], [compile with kml capabilities (default: no)]),
+		[KML=${withval}],
+		[KML=no]
+	)
 	AC_MSG_CHECKING(for kml capability compilation)
 
 	HAVE_KML=no
-	if test "x$KML" = "xyes"; then
+	if test "x${KML}" == "xyes"; then
 		HAVE_KML=yes
-		AC_DEFINE([_HAVE_KML_],[1],[with kml capability])
-	fi
-	AM_CONDITIONAL([KML], [test x$HAVE_KML = xyes])
-	AC_MSG_RESULT($HAVE_KML)
+		AC_DEFINE([_HAVE_KML_], [1], [with kml capability])
+	fi
+	AM_CONDITIONAL([KML], [test "x${HAVE_KML}" == "xyes"])
+	AC_MSG_RESULT([${HAVE_KML}])
 	dnl }}}
 	dnl with-kriging{{{
-	AC_ARG_WITH([kriging],
-		AS_HELP_STRING([--with-kriging = YES],[compile with kriging capabilities (default is yes)]),
-		[KRIGING=$withval],[KRIGING=yes])
+	AC_ARG_WITH(
+		[kriging],
+		AS_HELP_STRING([--with-kriging=YES], [compile with kriging capabilities (default: yes)]),
+		[KRIGING=${withval}],
+		[KRIGING=yes]
+	)
 	AC_MSG_CHECKING(for kriging capability compilation)
 
 	HAVE_KRIGING=no
-	if test "x$KRIGING" = "xyes"; then
+	if test "x${KRIGING}" == "xyes"; then
 		HAVE_KRIGING=yes
-		AC_DEFINE([_HAVE_KRIGING_],[1],[with kriging capability])
-	fi
-	AM_CONDITIONAL([KRIGING], [test x$HAVE_KRIGING = xyes])
-	AC_MSG_RESULT($HAVE_KRIGING)
+		AC_DEFINE([_HAVE_KRIGING_], [1], [with kriging capability])
+	fi
+	AM_CONDITIONAL([KRIGING], [test "x${HAVE_KRIGING}" == "xyes"])
+	AC_MSG_RESULT([${HAVE_KRIGING}])
 	dnl }}}
 
@@ -2161,66 +2337,63 @@
 	dnl Platform specifics
 	dnl with-ios{{{
-	AC_ARG_WITH([ios],
-		AS_HELP_STRING([--with-ios = YES], [compile with iOS capabilities (default is no, alternatives are yes)]),
-		[IOS=$withval],[IOS=no])
+	AC_ARG_WITH(
+		[ios],
+		AS_HELP_STRING([--with-ios=YES], [compile with iOS capabilities (default: no)]),
+		[IOS=${withval}],
+		[IOS=no]
+	)
 	AC_MSG_CHECKING(for iOS compilation)
 
-	if test "x$IOS" = "xyes"; then
-		dnl defaults
+	HAVE_IOS=no
+	if test "x${IOS}" == "xyes"; then
 		HAVE_IOS=yes
-
-		AC_DEFINE([_HAVE_IOS_],[1],[with android capability])
-	elif test "x$IOS" = "xno"; then
-		HAVE_IOS=no
-	else
-	  AC_MSG_ERROR([--with-ios should be either no or yes])
-	fi
-	AM_CONDITIONAL([IOS], [test x$HAVE_IOS != xno])
-	AC_MSG_RESULT($HAVE_IOS)
+		AC_DEFINE([_HAVE_IOS_], [1], [with iOS capability])
+	fi
+	AM_CONDITIONAL([IOS], [test "x${HAVE_IOS}" != "xno"])
+	AC_MSG_RESULT([${HAVE_IOS}])
 	dnl }}}
 	dnl with-android{{{
-	AC_ARG_WITH([android],
-		AS_HELP_STRING([--with-android = EXE], [compile with android capabilities (default is no, alternatives are exe and jni)]),
-		[ANDROID=$withval],[ANDROID=no])
-	AC_MSG_CHECKING(for android capability compilation)
-
-	if test "x$ANDROID" = "xjni"; then
-
-		dnl defaults
+	AC_ARG_WITH(
+		[android],
+		AS_HELP_STRING([--with-android=EXE], [compile with Android capabilities (default: "no"; alternatives: "exe", "jni")]),
+		[ANDROID=${withval}],
+		[ANDROID=no])
+	AC_MSG_CHECKING([for Android capability compilation])
+
+	if test "x${ANDROID}" == "xjni"; then
 		HAVE_ANDROID=jni
-		AC_DEFINE([_HAVE_ANDROID_],[1],[with android capability])
-		AC_DEFINE([_HAVE_ANDROID_JNI_],[1],[with android jni])
-	elif test "x$ANDROID" = "xexe"; then
-		dnl defaults
+		AC_DEFINE([_HAVE_ANDROID_], [1], [with Android capability])
+		AC_DEFINE([_HAVE_ANDROID_JNI_], [1], [with Android Java Native Interface (JNI)])
+	elif test "x${ANDROID}" == "xexe"; then
 		HAVE_ANDROID=exe
-
-		AC_DEFINE([_HAVE_ANDROID_],[1],[with android capability])
-	elif test "x$ANDROID" = "xno"; then
+		AC_DEFINE([_HAVE_ANDROID_], [1], [with Android capability])
+	elif test "x${ANDROID}" == "xno"; then
 		HAVE_ANDROID=no
 	else
-	  AC_MSG_ERROR([--with-android should be either no, exe or jni])
-	fi
-	AM_CONDITIONAL([ANDROID], [test x$HAVE_ANDROID != xno])
-	AM_CONDITIONAL([ANDROIDJNI], [test x$HAVE_ANDROID = xjni])
-	AM_CONDITIONAL([ANDROIDEXE], [test x$HAVE_ANDROID = xexe])
-	AC_MSG_RESULT($HAVE_ANDROID)
+		AC_MSG_ERROR([--with-android should be either "no", "exe" or "jni"])
+	fi
+	AM_CONDITIONAL([ANDROID], [test "x${HAVE_ANDROID}" != "xno"])
+	AM_CONDITIONAL([ANDROIDJNI], [test "x${HAVE_ANDROID}" == "xjni"])
+	AM_CONDITIONAL([ANDROIDEXE], [test "x${HAVE_ANDROID}" == "xexe"])
+	AC_MSG_RESULT([${HAVE_ANDROID}])
 	dnl }}}
 	dnl with-android-ndk{{{
-	AC_ARG_WITH([android-ndk],
-	  AS_HELP_STRING([--with-android-ndk=DIR], [android-ndk root directory.]),
-	  [ANDROID_NDK_ROOT=$withval],[ANDROID_NDK_ROOT=""])
-	AC_MSG_CHECKING(with android ndk)
-
-	if test -d "$ANDROID_NDK_ROOT"; then
-		dnl defaults
+	AC_ARG_WITH(
+		[android-ndk],
+		AS_HELP_STRING([--with-android-ndk=DIR], [Android NDK root directory]),
+		[ANDROID_NDK_ROOT=${withval}],
+		[ANDROID_NDK_ROOT=""]
+	)
+	AC_MSG_CHECKING([with Android Native Development Kit (NDK)])
+
+	if test -d "${ANDROID_NDK_ROOT}"; then
 		HAVE_ANDROID_NDK=yes
-		ANDROID_NDKINCL="-I$ANDROID_NDK_ROOT/arm-linux-android-install/sysroot/usr/include"
-
-		AC_DEFINE([_HAVE_ANDROID_NDK_],[1],[with android ndk in ISSM src])
+		ANDROID_NDKINCL="-I${ANDROID_NDK_ROOT}/arm-linux-android-install/sysroot/usr/include"
+		AC_DEFINE([_HAVE_ANDROID_NDK_], [1], [with Android NDK in ISSM src])
 		AC_SUBST([ANDROID_NDKINCL])
 	else
 		HAVE_ANDROID_NDK=no
 	fi
-	AC_MSG_RESULT($HAVE_ANDROID_NDK)
+	AC_MSG_RESULT([${HAVE_ANDROID_NDK}])
 	dnl }}}
 
@@ -2228,115 +2401,121 @@
 	dnl optimization{{{
 	dnl -- bypass standard optimization -g -O2 -fPIC ?
-	AC_ARG_WITH([cxxoptflags],
-	  AS_HELP_STRING([--with-cxxoptflags = CXXOPTFLAGS], [optimization using CXX flags, ex: --with-cxxoptflags=-march=opteron -O3]),
-	  [CXXOPTFLAGS=$withval],[CXXOPTFLAGS="-g -O2 -fPIC"])
-	AC_MSG_CHECKING(for c++ optimization flags)
+	AC_ARG_WITH(
+		[cxxoptflags],
+		AS_HELP_STRING([--with-cxxoptflags=CXXOPTFLAGS], [C++ optimization flags (i.e. --with-cxxoptflags="-march=opteron -O3"]),
+		[CXXOPTFLAGS=${withval}],
+		[CXXOPTFLAGS="-g -O2 -fPIC"]
+	)
+	AC_MSG_CHECKING(for C++ optimization flags)
 	AC_SUBST([CXXOPTFLAGS])
-	AC_MSG_RESULT(done)
+	AC_MSG_RESULT([done])
 	dnl }}}
 	dnl multithreading{{{
-	AC_ARG_WITH([numthreads],
-	  AS_HELP_STRING([--with-numthreads = NUMTHREADS_VALUE],[numthreads, default is 1. ]),
-	  [NUMTHREADS_VALUE=$withval],[NUMTHREADS_VALUE=1])
+	AC_ARG_WITH(
+		[numthreads],
+		AS_HELP_STRING([--with-numthreads=NUMTHREADS_VALUE], [number of threads (default: 1)]),
+		[NUMTHREADS_VALUE=${withval}],
+		[NUMTHREADS_VALUE=1]
+	)
 	AC_MSG_CHECKING(for number of threads)
-	dnl defaults
+	dnl Check that supplied value is an integer
+	if [[ "${NUMTHREADS_VALUE}" != "${NUMTHREADS_VALUE}" 2> /dev/null ]]; then
+		AC_MSG_ERROR([Number of threads provided (${NUMTHREADS_VALUE}) is not an integer!]);
+	elif test "${NUMTHREADS_VALUE}" == "0"; then
+		AC_MSG_ERROR([Number of threads must be at least 1!]);
+	fi
 	MULTITHREADING=no
 	MULTITHREADINLIB=""
-	if test "$NUMTHREADS_VALUE" != "1"; then
-
+	if test "x${NUMTHREADS_VALUE}" != "x1"; then
 		MULTITHREADINGLIB="-lpthread -lrt"
 		case "${host_os}" in
-		*cygwin*)
-		MULTITHREADINGLIB="-lpthread -lrt"
-		;;
-		*linux*)
-		MULTITHREADINGLIB="-lpthread -lrt"
-		;;
-		*darwin*)
-		MULTITHREADINGLIB="-lpthread"
-		;;
+			*cygwin*)
+				MULTITHREADINGLIB="-lpthread -lrt"
+			;;
+			*linux*)
+				MULTITHREADINGLIB="-lpthread -lrt"
+			;;
+			*darwin*)
+				MULTITHREADINGLIB="-lpthread"
+			;;
 		esac
-		AC_DEFINE([_MULTITHREADING_],[1],[with numthreads enabled])
-	fi
-	dnl check that it is an integer
-	if [[ "$NUMTHREADS_VALUE" -eq   "$NUMTHREADS_VALUE" 2> /dev/null ]] ; then
-	 dnl cool we have an integer !
-	 :
-	else
-	 AC_MSG_ERROR([Number of threads provided ($NUMTHREADS_VALUE) is not an integer]);
-	fi
-	AC_DEFINE_UNQUOTED([_NUMTHREADS_],[$NUMTHREADS_VALUE],[number of threads])
+		AC_DEFINE([_MULTITHREADING_], [1], [with multithreading enabled])
+	fi
+	AC_DEFINE_UNQUOTED(_NUMTHREADS_, $NUMTHREADS_VALUE, [number of threads])
 	AC_SUBST([MULTITHREADINGLIB])
-	AC_MSG_RESULT($NUMTHREADS_VALUE)
-	dnl }}}
-	dnl 64bit {{{
-	AC_ARG_WITH([64bit-indices],
-	  AS_HELP_STRING([--with-64bit-indices = bool], [use 64 bit integers, default 0, ex: --with-64bit-indices=1]),
-	  [USE_64BIT_INDICES=$withval],[USE_64BIT_INDICES=0])
-	AC_MSG_CHECKING(for 64 bit indices)
-
-	if test "$USE_64BIT_INDICES" == "1"; then
-	AC_DEFINE([ISSM_USE_64BIT_INDICES],[1],[with 64 bits indices])
-	else
-	AC_DEFINE([ISSM_USE_64BIT_INDICES],[0],[with 64 bits indices])
-	fi
-	AC_MSG_RESULT($USE_64BIT_INDICES)
-	dnl }}}
-
-	dnl Checks
-	dnl checks{{{
-		AC_MSG_CHECKING(consistency between all libraries)
-
-		  dnl check that if petsc is requested , mpi should be specified
-		  if test "$HAVE_PETSC" = "yes" ; then
-				if test "$HAVE_MPI" = "NO";  then
-					 AC_MSG_ERROR([petsc requires mpi!]);
-				fi
-		  fi
-
-		  dnl check that we have either python or matlab support if we compile the modules
-		  if test "$MODULES_VALUE" = "yes"  && test "$HAVE_MATLAB" = "no" && test "$HAVE_PYTHON" = "no"; then
-				AC_MSG_ERROR([need at least python or matlab support to compile modules (or use --with-modules=no)]);
-		  fi
-
-       dnl check that fortran is provided if GiaIvins is on
-		 if test "$HAVE_GIAIVINS" = "yes" &&  test "$HAVE_FORTRAN" = "no" ; then
-			  AC_MSG_ERROR([need fortran compiler to compile GiaIvins (or use --without-GiaIvins )]);
-		 fi
-
-       dnl check that fortran is provided if Love is on
-		 if test "$HAVE_LOVE" = "yes" &&  test "$HAVE_FORTRAN" = "no" ; then
-			  AC_MSG_ERROR([need fortran compiler to compile Love (or use --without-Love)]);
-		 fi
-
-		  dnl check that if we have MPI, we have metis
-		  if test "$HAVE_METIS" = "yes"  && test "$HAVE_MPI" = "no" ; then
-				AC_MSG_ERROR([need mpi if using the metis partitioner!]);
-		  fi
-
-		dnl check that if we run adolc, we don't compile krigging.exe
-		if test "$HAVE_ADOLC" = "yes"  && test "$HAVE_KRIGING" = "yes" ; then
-			AC_MSG_ERROR([cannot compile kriging.exe under adolc conditions!]);
-		fi
-		dnl check that if we run adolc, we don't use PETSc for now
-		if test "$HAVE_ADOLC" = "yes"  && test "$HAVE_PETSC" = "yes" ; then
-			AC_MSG_ERROR([cannot compile ISSM with both PETSc and adolc]);
-		fi
-		if test "$HAVE_ADOLC" = "yes"  && test "$HAVE_CODIPACK" = "yes" ; then
-			AC_MSG_ERROR([cannot compile ISSM with both ADOLC and CoDiPack]);
-		fi
-		if test "$HAVE_ADJOINTMPI" = "yes"  && test "$HAVE_MEDIPACK" = "yes" ; then
-			AC_MSG_ERROR([cannot compile ISSM with both MeDiPack and AdjointMPI]);
-		fi
-		dnl check that if we run meteoio, we have snowpack also
-		if test "$HAVE_METEOIO" = "yes"  && test "$HAVE_SNOWPACK" = "no" ; then
-			AC_MSG_ERROR([cannot compile MeteoIO package without Snowpack!]);
-		fi
-		dnl check that if we run snowpack, we have meteoio also
-		if test "$HAVE_METEOIO" = "no"  && test "$HAVE_SNOWPACK" = "yes" ; then
-			AC_MSG_ERROR([cannot compile Snowpack package without MeteoIO!]);
-		fi
-
-		AC_MSG_RESULT(done)
-		dnl }}}
+	AC_MSG_RESULT([${NUMTHREADS_VALUE}])
+	dnl }}}
+	dnl 64-bit indices{{{
+	AC_ARG_WITH(
+		[64bit-indices],
+		AS_HELP_STRING([--with-64bit-indices=bool], [use 64-bit indices (default: 0)]),
+		[USE_64BIT_INDICES=${withval}],
+		[USE_64BIT_INDICES=0]
+	)
+	AC_MSG_CHECKING([for 64-bit indices])
+
+	if test "x${USE_64BIT_INDICES}" == "x1"; then
+		AC_DEFINE([ISSM_USE_64BIT_INDICES], [1], [with 64-bit indices])
+	else
+		AC_DEFINE([ISSM_USE_64BIT_INDICES], [0], [with 64-bit indices])
+	fi
+	AC_MSG_RESULT([${USE_64BIT_INDICES}])
+	dnl }}}
+
+	dnl Checks {{{
+	AC_MSG_CHECKING(consistency between all libraries)
+
+	dnl Check that if PETSc is requested, MPI is specified
+	if test "x${HAVE_PETSC}" == "xyes"; then
+		if test "x${HAVE_MPI}" == "xno"; then
+			AC_MSG_ERROR([PETSc requires MPI!]);
+		fi
+	fi
+
+	dnl Check that we have MATLAB and/or Python support if we compile the modules
+	if test "x${MODULES_VALUE}" == "xyes" && test "${HAVE_MATLAB}" == "xno" && test "${HAVE_PYTHON}" == "xno"; then
+		AC_MSG_ERROR([need at least MATLAB and/or Python support to compile modules! (or use --with-modules=no)]);
+	fi
+
+	dnl Check that Fortran is provided if GiaIvins is on
+	if test "x${HAVE_GIAIVINS}" == "xyes" &&  test "${HAVE_FORTRAN}" == "xno"; then
+		AC_MSG_ERROR([need Fortran compiler to compile GiaIvins! (or use --without-GiaIvins )]);
+	fi
+
+	dnl Check that Fortran is provided if Love is on
+	if test "x${HAVE_LOVE}" == "xyes" && test "x${HAVE_FORTRAN}" == "xno"; then
+		AC_MSG_ERROR([need Fortran compiler to compile Love! (or use --without-Love)]);
+	fi
+
+	dnl Check that if we have MPI, we have METIS
+	if test "x${HAVE_METIS}" == "xyes" && test "x${HAVE_MPI}" == "xno"; then
+		AC_MSG_ERROR([need MPI if using the METIS partitioner!]);
+	fi
+
+	dnl Check that if we run ADOL-C, we don't compile kriging.exe
+	if test "x$HAVE_ADOLC" == "xyes" && test "${HAVE_KRIGING}" == "xyes"; then
+		AC_MSG_ERROR([cannot compile kriging.exe under ADOL-C conditions!]);
+	fi
+
+	dnl Check that if we run ADOL-C, we don't use PETSc for now
+	if test "x${HAVE_ADOLC}" == "xyes" && test "x${HAVE_PETSC}" == "xyes"; then
+		AC_MSG_ERROR([cannot compile ISSM with both PETSc and ADOL-C!]);
+	fi
+	if test "x${HAVE_ADOLC}" == "xyes" && test "x${HAVE_CODIPACK}" == "xyes"; then
+		AC_MSG_ERROR([cannot compile ISSM with both ADOL-C and CoDiPack!]);
+	fi
+	if test "x${HAVE_ADJOINTMPI}" == "xyes" && test "x${HAVE_MEDIPACK}" == "xyes"; then
+		AC_MSG_ERROR([cannot compile ISSM with both MeDiPack and AdjointMPI!]);
+	fi
+	dnl Check that if we run MeteoIO, we have SNOWPACK also
+	if test "x${HAVE_METEOIO}" == "xyes" && test "x${HAVE_SNOWPACK}" == "xno"; then
+		AC_MSG_ERROR([cannot compile MeteoIO package without SNOWPACK!]);
+	fi
+	dnl Check that if we run SNOWPACK, we have MeteoIO also
+	if test "${HAVE_METEOIO}" == "xno" && test "${HAVE_SNOWPACK}" == "xyes"; then
+		AC_MSG_ERROR([cannot compile SNOWPACK package without MeteoIO!]);
+	fi
+
+	AC_MSG_RESULT([done])
+	dnl }}}
 ])
Index: /issm/trunk/packagers/linux/package-issm-linux-binaries-with_dakota.sh
===================================================================
--- /issm/trunk/packagers/linux/package-issm-linux-binaries-with_dakota.sh	(revision 24686)
+++ /issm/trunk/packagers/linux/package-issm-linux-binaries-with_dakota.sh	(revision 24686)
@@ -0,0 +1,162 @@
+#!/bin/bash
+
+
+## Constants
+#
+LIBGFORTRAN="/usr/lib/x86_64-linux-gnu/libgfortran.so.5.0.0" # Important that this is the library itself
+LIBGFORTRAN_DIST="${ISSM_DIR}/lib/libgfortran.so.5" # Important the file name matches the SONAME entry in the binaries and other shared libraries which link to it
+MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234,243,244,250,417,444,445,701,702]" # Exclude any tests with transient solutions that require a restart
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+PACKAGE="ISSM" # Name of directory to copy distributable files to
+
+# Exclude any tests with transient solutions that require a restart
+#
+# NOTE:
+# - All non-excluded tests were running until recent changes to QMU
+# - 418 fails with "malloc(): invalid next size (unsorted)""
+#
+PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 243 244 250 417 418 435 444 445 701 702"
+TARBALL_NAME="issm-linux-with_dakota"
+TARBALL="${TARBALL_NAME}.tar.gz"
+
+# Clean up from previous packaging
+echo "Cleaning up existing assets"
+cd ${ISSM_DIR}
+rm -rf ${PACKAGE}
+mkdir ${PACKAGE}
+
+# Add/modify required binaries
+cd ${ISSM_DIR}/bin
+
+echo "Modifying generic"
+cat generic_static.m | sed -e "s/generic_static/generic/g" > generic.m
+cat generic_static.py | sed -e "s/generic_static/generic/g" > generic.py
+
+echo "Moving MPICH binaries to bin/"
+if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
+elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
+else
+	echo "MPICH not found"
+	exit 1
+fi
+
+# Add/modify required libraries
+echo "Moving libgfortran to lib/"
+cp ${LIBGFORTRAN} ${LIBGFORTRAN_DIST} 2> /dev/null
+
+# Run tests
+echo "Running tests"
+cd ${ISSM_DIR}/test/NightlyRun
+
+# Check that MATLAB tests run
+echo "Running MATLAB tests"
+
+rm matlab.log 2> /dev/null
+
+# Run MATLAB tests redirecting output to logfile and suppressing output to console
+${MATLAB_PATH}/bin/matlab -nojvm -nosplash -r "try, addpath ${ISSM_DIR}/bin ${ISSM_DIR}/lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log &> /dev/null
+
+# Check that MATLAB did not exit in error
+matlabExitCode=`echo $?`
+matlabExitedInError=`grep -E "Activation cannot proceed|license" matlab.log | wc -l`
+
+if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
+	echo "----------MATLAB exited in error!----------"
+	cat matlab.log
+	echo "-----------End of matlab.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all MATLAB tests passed
+numMatlabTestsFailed=`cat matlab.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numMatlabTestsFailed} -ne 0 ]]; then
+	echo "One or more MATLAB tests FAILED"
+	exit 1;
+else
+	echo "All MATLAB tests PASSED"
+fi
+
+# Check that Python tests run
+echo "Running Python tests"
+
+export PATH="${PATH}:${ISSM_DIR}/bin"
+export PYTHONPATH="${ISSM_DIR}/src/m/dev"
+export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
+export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
+
+rm python.log 2> /dev/null
+./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
+
+# Check that Python did not exit in error
+pythonExitCode=`echo $?`
+pythonExitedInError=`grep -E "Error|Traceback|bad interpreter" python.log | wc -l`
+
+if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
+	echo "----------Python exited in error!----------"
+	cat python.log
+	echo "-----------End of python.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all Python tests passed
+numPythonTestsFailed=`cat python.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numPythonTestsFailed} -ne 0 ]]; then
+	echo "One or more Python tests FAILED"
+	exit 1;
+else
+	echo "All Python tests PASSED"
+fi
+
+# Create tarball
+cd ${ISSM_DIR}
+rm -f ${TARBALL}
+svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
+echo "Copying assets to package: ${PACKAGE}"
+cp -rf bin examples lib scripts test ${PACKAGE}/
+echo "Cleaning up unneeded/unwanted files"
+python -m compileall ${PACKAGE}/bin # Precompile all Python scripts to bytecode
+rm -f ${PACKAGE}/bin/*.py # Remove all Python scripts
+rm -f ${PACKAGE}/bin/generic_static.* # Remove static versions of generic cluster classes
+rm -f ${PACKAGE}/lib/*.a # Remove static libraries from package
+rm -f ${PACKAGE}/lib/*.la # Remove libtool libraries from package
+echo "Creating tarball: ${TARBALL_NAME}"
+tar -czf ${TARBALL} ${PACKAGE}
+ls -lah ${ISSM_DIR}/${TARBALL}
+
+echo "Shipping binaries to website"
+
+# We're using public key authentication method to upload the tarball The
+# following lines check to see if the SSH Agent is running. If not, then it is
+# started and relevant information is forwarded to a script.
+pgrep "ssh-agent" > /dev/null
+if [ $? -ne 0 ]; then
+	echo "SSH Agent is not running. Starting it..."
+	ssh-agent > ~/.ssh/agent.sh
+else
+	echo "SSH Agent is running..."
+fi
+
+source ~/.ssh/agent.sh
+ssh-add ~/.ssh/debian_linux-vm-to-ross
+
+scp ${TARBALL} ross.ics.uci.edu:/var/www/html/${TARBALL}
+
+if [ $? -ne 0 ]; then
+	echo "The upload failed."
+	echo "Perhaps the SSH Agent was started by some other means."
+	echo "Try killing the agent and running again."
+fi
Index: /issm/trunk/packagers/linux/package-issm-linux-binaries.sh
===================================================================
--- /issm/trunk/packagers/linux/package-issm-linux-binaries.sh	(revision 24686)
+++ /issm/trunk/packagers/linux/package-issm-linux-binaries.sh	(revision 24686)
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+
+## Constants
+#
+LIBGFORTRAN="/usr/lib/x86_64-linux-gnu/libgfortran.so.5.0.0" # Important that this is the library itself
+LIBGFORTRAN_DIST="${ISSM_DIR}/lib/libgfortran.so.5" # Important the file name matches the SONAME entry in the binaries and other shared libraries which link to it
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota'),125,126]" # Exclude Dakota tests and any tests with transient solutions that require a restart
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+PACKAGE="ISSM" # Name of directory to copy distributable files to
+TARBALL_NAME="issm-linux"
+TARBALL="${TARBALL_NAME}.tar.gz"
+
+# Clean up from previous packaging
+echo "Cleaning up existing assets"
+cd ${ISSM_DIR}
+rm -rf ${PACKAGE}
+mkdir ${PACKAGE}
+
+# Add/modify required binaries and libraries
+cd ${ISSM_DIR}/bin
+
+echo "Modify generic"
+cat generic_static.m | sed -e "s/generic_static/generic/g" > generic.m
+
+echo "Moving MPICH binaries to bin/"
+if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
+elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
+else
+	echo "MPICH not found"
+	exit 1
+fi
+
+echo "Moving libgfortran to lib/"
+cp ${LIBGFORTRAN} ${LIBGFORTRAN_DIST} 2> /dev/null
+
+# Run tests
+echo "Running tests"
+cd ${ISSM_DIR}/test/NightlyRun
+
+# Check that MATLAB tests run
+echo "Running MATLAB tests"
+
+rm matlab.log 2> /dev/null
+
+# Run MATLAB tests redirecting output to logfile and suppressing output to console
+${MATLAB_PATH}/bin/matlab -nojvm -nosplash -r "try, addpath ${ISSM_DIR}/bin ${ISSM_DIR}/lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log &> /dev/null
+
+# Check that MATLAB did not exit in error
+matlabExitCode=`echo $?`
+matlabExitedInError=`grep -E "Activation cannot proceed|license" matlab.log | wc -l`
+
+if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
+	echo "----------MATLAB exited in error!----------"
+	cat matlab.log
+	echo "-----------End of matlab.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all MATLAB tests passed
+numMatlabTestsFailed=`cat matlab.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numMatlabTestsFailed} -ne 0 ]]; then
+	echo "One or more MATLAB tests FAILED"
+	exit 1;
+else
+	echo "All MATLAB tests PASSED"
+fi
+
+# Create tarball
+cd ${ISSM_DIR}
+rm -f ${TARBALL}
+svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
+echo "Copying assets to package: ${PACKAGE}"
+cp -rf bin examples lib scripts test ${PACKAGE}/
+echo "Cleaning up unneeded/unwanted files"
+rm -f ${PACKAGE}/bin/generic_static.* # Remove static versions of generic cluster classes
+rm -f ${PACKAGE}/lib/*.a # Remove static libraries from package
+rm -f ${PACKAGE}/lib/*.la # Remove libtool libraries from package
+echo "Creating tarball: ${TARBALL_NAME}"
+tar -czf ${TARBALL} ${PACKAGE}
+ls -lah ${ISSM_DIR}/${TARBALL}
+
+echo "Shipping binaries to website"
+
+# We're using public key authentication method to upload the tarball The
+# following lines check to see if the SSH Agent is running. If not, then it is
+# started and relevant information is forwarded to a script.
+pgrep "ssh-agent" > /dev/null
+if [ $? -ne 0 ]; then
+	echo "SSH Agent is not running. Starting it..."
+	ssh-agent > ~/.ssh/agent.sh
+else
+	echo "SSH Agent is running..."
+fi
+
+source ~/.ssh/agent.sh
+ssh-add ~/.ssh/debian_linux-vm-to-ross
+
+scp ${TARBALL} ross.ics.uci.edu:/var/www/html/${TARBALL}
+
+if [ $? -ne 0 ]; then
+	echo "The upload failed."
+	echo "Perhaps the SSH Agent was started by some other means."
+	echo "Try killing the agent and running again."
+fi
Index: /issm/trunk/packagers/mac/package-issm-mac-binaries-with_dakota.sh
===================================================================
--- /issm/trunk/packagers/mac/package-issm-mac-binaries-with_dakota.sh	(revision 24686)
+++ /issm/trunk/packagers/mac/package-issm-mac-binaries-with_dakota.sh	(revision 24686)
@@ -0,0 +1,155 @@
+#!/bin/bash
+
+
+## Constants
+#
+MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234 243 420 435 701 702 703]" # Exclude any tests with transient solutions that require a restart, and any tests that are excluded in the standard build
+MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+PACKAGE="ISSM" # Name of directory to copy distributable files to
+
+# Exclude any tests with transient solutions that require a restart
+#
+# NOTE:
+# - 418 fails with "malloc(): invalid next size (unsorted)""
+#
+PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 243 418 420 435 701 702 703"
+TARBALL_NAME="issm-mac-with_dakota"
+TARBALL="${TARBALL_NAME}.tar.gz"
+
+# Clean up from previous packaging
+echo "Cleaning up existing assets"
+cd ${ISSM_DIR}
+rm -rf ${PACKAGE}
+mkdir ${PACKAGE}
+
+# Add/modify required binaries
+cd ${ISSM_DIR}/bin
+
+echo "Modifying generic"
+cat generic_static.m | sed -e "s/generic_static/generic/g" > generic.m
+cat generic_static.py | sed -e "s/generic_static/generic/g" > generic.py
+
+echo "Moving MPICH binaries to bin/"
+if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
+elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
+else
+	echo "MPICH not found"
+	exit 1
+fi
+
+# Run tests
+echo "Running tests"
+cd ${ISSM_DIR}/test/NightlyRun
+
+# Check that MATLAB tests run
+echo "Running MATLAB tests"
+
+rm matlab.log 2> /dev/null
+
+# Run MATLAB tests redirecting output to logfile and suppressing output to console
+${MATLAB_PATH}/bin/matlab -nojvm -nosplash -r "try, addpath ${ISSM_DIR}/bin ${ISSM_DIR}/lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log &> /dev/null
+
+# Check that MATLAB did not exit in error
+matlabExitCode=`echo $?`
+matlabExitedInError=`grep -E "Activation cannot proceed|license" matlab.log | wc -l`
+
+if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
+	echo "----------MATLAB exited in error!----------"
+	cat matlab.log
+	echo "-----------End of matlab.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all MATLAB tests passed
+numMatlabTestsFailed=`cat matlab.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numMatlabTestsFailed} -ne 0 ]]; then
+	echo "One or more MATLAB tests FAILED"
+	exit 1;
+else
+	echo "All MATLAB tests PASSED"
+fi
+
+# Check that Python tests run
+echo "Running Python tests"
+
+export PATH="${PATH}:${ISSM_DIR}/bin"
+export PYTHONPATH="${ISSM_DIR}/src/m/dev"
+export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
+export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
+
+rm python.log 2> /dev/null
+./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
+
+# Check that Python did not exit in error
+pythonExitCode=`echo $?`
+pythonExitedInError=`grep -E "Error|Traceback|bad interpreter" python.log | wc -l`
+
+if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
+	echo "----------Python exited in error!----------"
+	cat python.log
+	echo "-----------End of python.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all Python tests passed
+numPythonTestsFailed=`cat python.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numPythonTestsFailed} -ne 0 ]]; then
+	echo "One or more Python tests FAILED"
+	exit 1;
+else
+	echo "All Python tests PASSED"
+fi
+
+# Create tarball
+cd ${ISSM_DIR}
+rm -f ${TARBALL}
+svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
+echo "Copying assets to package: ${PACKAGE}"
+cp -rf bin examples lib scripts test ${PACKAGE}/
+echo "Cleaning up unneeded/unwanted files"
+python -m compileall ${PACKAGE}/bin # Precompile all Python scripts to bytecode
+rm -f ${PACKAGE}/bin/*.py # Remove all Python scripts
+rm -f ${PACKAGE}/bin/generic_static.* # Remove static versions of generic cluster classes
+rm -f ${PACKAGE}/lib/*.a # Remove static libraries from package
+rm -f ${PACKAGE}/lib/*.la # Remove libtool libraries from package
+echo "Creating tarball: ${TARBALL_NAME}"
+tar -czf ${TARBALL} ${PACKAGE}
+ls -lah ${ISSM_DIR}/${TARBALL}
+
+echo "Shipping binaries to website"
+
+# We're using public key authentication method to upload the tarball The
+# following lines check to see if the SSH Agent is running. If not, then it is
+# started and relevant information is forwarded to a script.
+pgrep "ssh-agent" > /dev/null
+if [ $? -ne 0 ]; then
+	echo "SSH Agent is not running. Starting it..."
+	ssh-agent > ~/.ssh/agent.sh
+else
+	echo "SSH Agent is running..."
+fi
+
+source ~/.ssh/agent.sh
+ssh-add ~/.ssh/debian_linux-vm-to-ross
+
+scp ${TARBALL} ross.ics.uci.edu:/var/www/html/${TARBALL}
+
+if [ $? -ne 0 ]; then
+	echo "The upload failed."
+	echo "Perhaps the SSH Agent was started by some other means."
+	echo "Try killing the agent and running again."
+fi
Index: /issm/trunk/packagers/mac/package-issm-mac-binaries.sh
===================================================================
--- /issm/trunk/packagers/mac/package-issm-mac-binaries.sh	(revision 24686)
+++ /issm/trunk/packagers/mac/package-issm-mac-binaries.sh	(revision 24686)
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+
+## Constants
+#
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota'),125,126,435,701,702,703]" # Exclude Dakota tests, any tests with transient solutions that require a restart, and any tests that are excluded in the standard build
+MATLAB_PATH="/Applications/MATLAB_R2018a.app"
+PACKAGE="ISSM" # Name of directory to copy distributable files to
+TARBALL_NAME="issm-mac"
+TARBALL="${TARBALL_NAME}.tar.gz"
+
+# Clean up from previous packaging
+echo "Cleaning up existing assets"
+cd ${ISSM_DIR}
+rm -rf ${PACKAGE}
+mkdir ${PACKAGE}
+
+# Add/modify required binaries and libraries
+cd ${ISSM_DIR}/bin
+
+echo "Modify generic"
+cat generic_static.m | sed -e "s/generic_static/generic/g" > generic.m
+
+echo "Moving MPICH binaries to bin/"
+if [ -f ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/petsc/install/bin/hydra_pmi_proxy .
+elif [ -f ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec ]; then
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/mpiexec .
+	cp ${ISSM_DIR}/externalpackages/mpich/install/bin/hydra_pmi_proxy .
+else
+	echo "MPICH not found"
+	exit 1
+fi
+
+# Run tests
+echo "Running tests"
+cd ${ISSM_DIR}/test/NightlyRun
+
+# Check that MATLAB tests run
+echo "Running MATLAB tests"
+
+rm matlab.log 2> /dev/null
+
+# Run MATLAB tests redirecting output to logfile and suppressing output to console
+${MATLAB_PATH}/bin/matlab -nojvm -nosplash -r "try, addpath ${ISSM_DIR}/bin ${ISSM_DIR}/lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log 2> /dev/null
+
+# Check that MATLAB did not exit in error
+matlabExitCode=`echo $?`
+matlabExitedInError=`grep -E "Activation cannot proceed|license" matlab.log | wc -l`
+
+if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
+	echo "----------MATLAB exited in error!----------"
+	cat matlab.log
+	echo "-----------End of matlab.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all MATLAB tests passed
+numMatlabTestsFailed=`cat matlab.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numMatlabTestsFailed} -ne 0 ]]; then
+	echo "One or more MATLAB tests FAILED"
+	exit 1;
+else
+	echo "All MATLAB tests PASSED"
+fi
+
+# Create tarball
+cd ${ISSM_DIR}
+rm -f ${TARBALL}
+svn cleanup --remove-ignored --remove-unversioned test # Clean up test directory (before copying to package)
+echo "Copying assets to package: ${PACKAGE}"
+cp -rf bin examples lib scripts test ${PACKAGE}/
+echo "Cleaning up unneeded/unwanted files"
+rm -f ${PACKAGE}/bin/generic_static.* # Remove static versions of generic cluster classes
+rm -f ${PACKAGE}/lib/*.a # Remove static libraries from package (we only need MEX-files)
+rm -f ${PACKAGE}/lib/*.la # Remove libtool libraries from package
+echo "Creating tarball: ${TARBALL_NAME}"
+tar -czf ${TARBALL} ${PACKAGE}
+ls -lah ${ISSM_DIR}/${TARBALL}
+
+echo "Shipping binaries to website"
+
+# We're using public key authentication method to upload the tarball The
+# following lines check to see if the SSH Agent is running. If not, then it is
+# started and relevant information is forwarded to a script.
+pgrep "ssh-agent" > /dev/null
+if [ $? -ne 0 ]; then
+	echo "SSH Agent is not running. Starting it..."
+	ssh-agent > ~/.ssh/agent.sh
+else
+	echo "SSH Agent is running..."
+fi
+
+source ~/.ssh/agent.sh
+ssh-add ~/.ssh/debian_linux-vm-to-ross
+
+scp ${TARBALL} ross.ics.uci.edu:/var/www/html/${TARBALL}
+
+if [ $? -ne 0 ]; then
+	echo "The upload failed."
+	echo "Perhaps the SSH Agent was started by some other means."
+	echo "Try killing the agent and running again."
+fi
Index: /issm/trunk/scripts/devpath.py
===================================================================
--- /issm/trunk/scripts/devpath.py	(revision 24686)
+++ /issm/trunk/scripts/devpath.py	(revision 24686)
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# NOTE: This script is a stripped-down version of
+#       $ISSM_DIR/src/m/dev/devpath.py and is intended only for loading ISSM in
+#       order to test our distributable packages. It assumes the following is
+#       set before $ISSM_DIR/test/NightlyRun/runme.py is called,
+#
+#           export ISSM_DIR=</path/to/ISSM>
+#			export PATH="${PATH}:${ISSM_DIR}/bin"
+#           export PYTHONPATH="${ISSM_DIR}/scripts"
+#           export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
+#           export PYTHONUNBUFFERED=1
+#
+
+import os
+import sys
+
+ISSM_DIR = os.getenv('ISSM_DIR')
+sys.path.append(ISSM_DIR + '/bin')
+sys.path.append(ISSM_DIR + '/lib')
+
+from issmversion import issmversion
Index: /issm/trunk/scripts/test-issm-linux-binaries-with_dakota.sh
===================================================================
--- /issm/trunk/scripts/test-issm-linux-binaries-with_dakota.sh	(revision 24686)
+++ /issm/trunk/scripts/test-issm-linux-binaries-with_dakota.sh	(revision 24686)
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+# This script is intended to test binaries downloaded to a user-end machine.
+#
+# NOTE: Tarball must already exist in INSTALL_DIR
+#
+
+MATLAB_NROPTIONS="'id',[IdFromString('Dakota')],'exclude',[234,244,250,417,444,445]"  # Exclude any tests with transient solutions that require a restart
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+INSTALL_DIR=~/Downloads
+PACKAGE_NAME="ISSM"
+
+# Exclude any tests with transient solutions that require a restart
+#
+# NOTE:
+# - All non-excluded tests were running until recent changes to QMU
+# - 418 fails with "malloc(): invalid next size (unsorted)""
+#
+PYTHON_NROPTIONS="--include_name 'Dakota' --exclude 234 244 250 417 418 444 445"
+TARBALL_NAME="issm-linux-with_dakota"
+TARBALL="${TARBALL_NAME}.tar.gz"
+
+cd ${INSTALL_DIR}
+rm -rf ${PACKAGE_NAME}
+tar -zxvf ${TARBALL}
+cd ${PACKAGE_NAME}/test/NightlyRun
+
+# Check that MATLAB tests run
+echo "Running MATLAB tests"
+rm matlab.log 2> /dev/null
+
+# Run MATLAB tests redirecting output to logfile and suppressing output to console
+${MATLAB_PATH}/bin/matlab -nojvm -nosplash -r "try, addpath ../../bin; addpath ../../lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log &> /dev/null
+
+# Check that MATLAB did not exit in error
+matlabExitCode=`echo $?`
+matlabExitedInError=`grep -E "Activation cannot proceed|license|Error" matlab.log | wc -l`
+
+if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
+	echo "----------MATLAB exited in error!----------"
+	cat matlab.log
+	echo "-----------End of matlab.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all MATLAB tests passed
+numMatlabTestsFailed=`cat matlab.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numMatlabTestsFailed} -ne 0 ]]; then
+	echo "One or more MATLAB tests FAILED"
+	exit 1;
+else
+	echo "All MATLAB tests PASSED"
+fi
+
+# Check that Python tests run
+echo "Running Python tests"
+
+export ISSM_DIR="${INSTALL_DIR}"
+export PATH="${PATH}:${ISSM_DIR}/bin"
+export PYTHONPATH="${ISSM_DIR}/scripts"
+export PYTHONSTARTUP="${PYTHONPATH}/devpath.py"
+export PYTHONUNBUFFERED=1 # We don't want Python to buffer output, otherwise issm.exe output is not captured
+
+rm python.log 2> /dev/null
+./runme.py ${PYTHON_NROPTIONS} &> python.log 2>&1
+
+# Check that Python did not exit in error
+pythonExitCode=`echo $?`
+pythonExitedInError=`grep -E "runme.py: error" python.log | wc -l`
+
+if [[ ${pythonExitCode} -ne 0 || ${pythonExitedInError} -ne 0 ]]; then
+	echo "----------Python exited in error!----------"
+	cat python.log
+	echo "-----------End of python.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all Python tests passed
+numPythonTestsFailed=`cat python.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numPythonTestsFailed} -ne 0 ]]; then
+	echo "One or more Python tests FAILED"
+	exit 1;
+else
+	echo "All Python tests PASSED"
+fi
Index: /issm/trunk/scripts/test-issm-linux-binaries.sh
===================================================================
--- /issm/trunk/scripts/test-issm-linux-binaries.sh	(revision 24686)
+++ /issm/trunk/scripts/test-issm-linux-binaries.sh	(revision 24686)
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# This script is intended to test binaries downloaded to a user-end machine.
+#
+# NOTE: Tarball must already exist in INSTALL_DIR
+#
+
+MATLAB_NROPTIONS="'exclude',[IdFromString('Dakota'),125,126]" # Exclude Dakota tests and any tests with transient solutions that require a restart
+MATLAB_PATH="/usr/local/MATLAB/R2019b"
+INSTALL_DIR=~/Downloads
+PACKAGE_NAME="ISSM"
+TARBALL_NAME="issm-linux"
+TARBALL="${TARBALL_NAME}.tar.gz"
+
+cd ${INSTALL_DIR}
+rm -rf ${PACKAGE_NAME}
+tar -zxvf ${TARBALL}
+cd ${PACKAGE_NAME}/test/NightlyRun
+
+# Check that MATLAB tests run
+echo "Running MATLAB tests"
+rm matlab.log 2> /dev/null
+
+# Run MATLAB tests redirecting output to logfile and suppressing output to console
+${MATLAB_PATH}/bin/matlab -nojvm -nosplash -r "try, addpath ../../bin; addpath ../../lib; runme(${MATLAB_NROPTIONS}); exit; catch me,fprintf('%s',getReport(me)); exit; end" -logfile matlab.log &> /dev/null
+
+# Check that MATLAB did not exit in error
+matlabExitCode=`echo $?`
+matlabExitedInError=`grep -E "Activation cannot proceed|license|Error" matlab.log | wc -l`
+
+if [[ ${matlabExitCode} -ne 0 || ${matlabExitedInError} -ne 0 ]]; then
+	echo "----------MATLAB exited in error!----------"
+	cat matlab.log
+	echo "-----------End of matlab.log-----------"
+
+	# Clean up execution directory
+	rm -rf ${ISSM_DIR}/execution/*
+
+	exit 1
+fi
+
+# Check that all MATLAB tests passed
+numMatlabTestsFailed=`cat matlab.log | grep -c -e "FAILED|ERROR"`
+
+if [[ ${numMatlabTestsFailed} -ne 0 ]]; then
+	echo "One or more MATLAB tests FAILED"
+	exit 1;
+else
+	echo "All MATLAB tests PASSED"
+fi
Index: /issm/trunk/src/c/Makefile.am
===================================================================
--- /issm/trunk/src/c/Makefile.am	(revision 24685)
+++ /issm/trunk/src/c/Makefile.am	(revision 24686)
@@ -8,10 +8,10 @@
 #Library declaration {{{
 if !WINDOWS
-lib_LTLIBRARIES = libISSMCore.la libISSMOverload.la 
+lib_LTLIBRARIES = libISSMCore.la libISSMOverload.la
 if WRAPPERS
 lib_LTLIBRARIES += libISSMModules.la
 endif
 else
-noinst_LTLIBRARIES = libISSMCore.la libISSMOverload.la 
+noinst_LTLIBRARIES = libISSMCore.la libISSMOverload.la
 if WRAPPERS
 noinst_LTLIBRARIES += libISSMModules.la
@@ -22,41 +22,43 @@
 #Core sources
 #BAMG sources  {{{
-issm_sources = 
+issm_sources =
 if BAMG
-issm_sources += ./bamg/BamgGeom.cpp\
-					 ./bamg/BamgMesh.cpp\
-					 ./bamg/BamgOpts.cpp\
-					 ./bamg/CrackedEdge.cpp\
-					 ./bamg/Curve.cpp\
-					 ./bamg/Edge.cpp\
-					 ./bamg/GeomEdge.cpp\
-					 ./bamg/GeomSubDomain.cpp\
-					 ./bamg/GeomVertex.cpp\
-					 ./bamg/Geometry.cpp\
-					 ./bamg/ListofIntersectionTriangles.cpp\
-					 ./bamg/EigenMetric.cpp\
-					 ./bamg/Metric.cpp\
-					 ./bamg/BamgQuadtree.cpp\
-					 ./bamg/SetOfE4.cpp\
-					 ./bamg/SubDomain.cpp\
-					 ./bamg/AdjacentTriangle.cpp\
-					 ./bamg/Triangle.cpp\
-					 ./bamg/BamgVertex.cpp\
-					 ./bamg/VertexOnEdge.cpp\
-					 ./bamg/VertexOnGeom.cpp\
-					 ./bamg/VertexOnVertex.cpp\
-					 ./bamg/Mesh.cpp\
-					 ./shared/Bamg/BigPrimeNumber.cpp\
-					 ./modules/Bamgx/Bamgx.cpp\
-					 ./modules/BamgConvertMeshx/BamgConvertMeshx.cpp\
-					 ./modules/BamgTriangulatex/BamgTriangulatex.cpp
+issm_sources += \
+	./bamg/BamgGeom.cpp \
+	./bamg/BamgMesh.cpp \
+	./bamg/BamgOpts.cpp \
+	./bamg/CrackedEdge.cpp \
+	./bamg/Curve.cpp \
+	./bamg/Edge.cpp \
+	./bamg/GeomEdge.cpp \
+	./bamg/GeomSubDomain.cpp \
+	./bamg/GeomVertex.cpp \
+	./bamg/Geometry.cpp \
+	./bamg/ListofIntersectionTriangles.cpp \
+	./bamg/EigenMetric.cpp \
+	./bamg/Metric.cpp \
+	./bamg/BamgQuadtree.cpp \
+	./bamg/SetOfE4.cpp \
+	./bamg/SubDomain.cpp \
+	./bamg/AdjacentTriangle.cpp \
+	./bamg/Triangle.cpp \
+	./bamg/BamgVertex.cpp \
+	./bamg/VertexOnEdge.cpp \
+	./bamg/VertexOnGeom.cpp \
+	./bamg/VertexOnVertex.cpp \
+	./bamg/Mesh.cpp \
+	./shared/Bamg/BigPrimeNumber.cpp \
+	./modules/Bamgx/Bamgx.cpp \
+	./modules/BamgConvertMeshx/BamgConvertMeshx.cpp \
+	./modules/BamgTriangulatex/BamgTriangulatex.cpp
 
 #do not include AmrBamg with AD
 if ADOLC
-issm_sources +=./shared/Numerics/isnan.cpp \
-					./shared/MemOps/MemOps.cpp
+issm_sources += \
+	./shared/Numerics/isnan.cpp \
+	./shared/MemOps/MemOps.cpp
 else
 if CODIPACK
-issm_sources +=./shared/Numerics/isnan.cpp
+issm_sources += ./shared/Numerics/isnan.cpp
 else
 issm_sources += ./classes/AmrBamg.cpp
@@ -66,265 +68,265 @@
 #}}}
 #Core sources{{{
-issm_sources += ./datastructures/DataSet.cpp\
-					./classes/gauss/GaussSeg.cpp\
-					./classes/gauss/GaussTria.cpp\
-					./classes/gauss/GaussTetra.cpp\
-					./classes/gauss/GaussPenta.cpp\
-					./classes/IoModel.cpp\
-					./classes/FemModel.cpp\
-					./classes/Loads/Friction.cpp\
-					./classes/Inputs/TransientInput.cpp\
-					./classes/Constraints/SpcTransient.cpp\
-					./classes/DependentObject.cpp\
-					./classes/Contours.cpp\
-					./classes/Vertices.cpp\
-					./classes/Nodes.cpp\
-					./classes/Numberedcostfunction.cpp\
-					./classes/Misfit.cpp\
-					./classes/Cfsurfacesquare.cpp\
-					./classes/Cfdragcoeffabsgrad.cpp\
-					./classes/Cfsurfacelogvel.cpp\
-					./classes/Regionaloutput.cpp\
-					./classes/Nodalvalue.cpp\
-					./classes/Node.cpp\
-					./classes/Vertex.cpp\
-					./classes/Hook.cpp\
-					./classes/Radar.cpp\
-					./classes/ExternalResults/Results.cpp\
-					./classes/Elements/Element.cpp\
-					./classes/Elements/Elements.cpp\
-					./classes/Elements/ElementHook.cpp\
-					./classes/Elements/Seg.cpp\
-					./classes/Elements/SegRef.cpp\
-					./classes/Elements/Tria.cpp\
-					./classes/Elements/TriaRef.cpp\
-					./classes/Elements/Tetra.cpp\
-					./classes/Elements/TetraRef.cpp\
-					./classes/Elements/Penta.cpp\
-					./classes/Elements/PentaRef.cpp\
-					./classes/Inputs/Inputs.cpp\
-					./classes/Inputs/SegInput.cpp\
-					./classes/Inputs/TriaInput.cpp\
-					./classes/Inputs/BoolInput.cpp\
-					./classes/Inputs/IntInput.cpp\
-					./classes/Inputs/DoubleInput.cpp\
-					./classes/Inputs/DoubleArrayInput.cpp\
-					./classes/Inputs/DatasetInput.cpp\
-					./classes/Materials/Materials.cpp\
-					./classes/Materials/Matice.cpp\
-					./classes/Materials/Matlitho.cpp\
-					./classes/Materials/Matestar.cpp\
-					./classes/Constraints/Constraints.cpp\
-					./classes/Constraints/SpcStatic.cpp\
-					./classes/Constraints/SpcDynamic.cpp\
-					./classes/Loads/Channel.cpp\
-					./classes/Loads/Loads.cpp\
-					./classes/Loads/Penpair.cpp\
-					./classes/Loads/Pengrid.cpp\
-					./classes/Loads/Moulin.cpp\
-					./classes/Loads/Numericalflux.cpp\
-					./classes/Loads/Neumannflux.cpp\
-					./classes/matrix/ElementMatrix.cpp\
-					./classes/matrix/ElementVector.cpp\
-					./classes/Params/Parameters.cpp\
-					./classes/Params/BoolParam.cpp\
-					./classes/Params/IntParam.cpp\
-					./classes/Params/IntVecParam.cpp\
-					./classes/Params/IntMatParam.cpp\
-					./classes/Params/DoubleParam.cpp\
-					./classes/Params/FileParam.cpp\
-					./classes/Params/StringArrayParam.cpp\
-					./classes/Params/DoubleMatParam.cpp\
-					./classes/Params/DoubleTransientMatParam.cpp\
-					./classes/Params/DoubleMatArrayParam.cpp\
-					./classes/Params/DoubleVecParam.cpp\
-					./classes/Params/StringParam.cpp\
-					./classes/Params/MatrixParam.cpp\
-					./classes/Params/VectorParam.cpp\
-					./classes/Params/TransientParam.cpp\
-					./classes/Params/TransientArrayParam.cpp\
-					./classes/Params/DataSetParam.cpp\
-					./classes/Profiler.cpp\
-					./shared/Matrix/MatrixUtils.cpp\
-					./shared/io/Disk/pfopen.cpp\
-					./shared/io/Disk/pfclose.cpp\
-					./shared/io/Disk/WriteLockFile.cpp\
-					./shared/io/Print/PrintfFunction.cpp\
-					./shared/io/Comm/IssmComm.cpp\
-					./shared/io/Marshalling/IoCodeConversions.cpp \
-					./shared/LatLong/Ll2xyx.cpp\
-					./shared/LatLong/Xy2llx.cpp\
-					./shared/FSanalyticals/fsanalyticals.cpp\
-					./shared/Enum/EnumToStringx.cpp\
-					./shared/Enum/StringToEnumx.cpp\
-					./shared/Numerics/Verbosity.cpp\
-					./shared/Numerics/GaussPoints.cpp\
-					./shared/Numerics/cross.cpp\
-					./shared/Numerics/cubic.cpp\
-					./shared/Numerics/NewtonSolveDnorm.cpp\
-					./shared/Numerics/ODE1.cpp\
-					./shared/Numerics/extrema.cpp\
-					./shared/Numerics/legendre.cpp\
-					./shared/Numerics/XZvectorsToCoordinateSystem.cpp\
-					./shared/Exceptions/Exceptions.cpp\
-					./shared/Sorting/binary_search.cpp\
-					./shared/Elements/Cuffey.cpp\
-					./shared/Elements/BuddJacka.cpp\
-					./shared/Elements/CuffeyTemperate.cpp\
-					./shared/Elements/StressIntensityIntegralWeight.cpp\
-					./shared/Elements/Paterson.cpp\
-					./shared/Elements/Arrhenius.cpp\
-					./shared/Elements/NyeCO2.cpp\
-					./shared/Elements/NyeH2O.cpp\
-					./shared/Elements/LliboutryDuval.cpp \
-					./shared/Elements/PrintArrays.cpp\
-					./shared/Elements/PddSurfaceMassBalance.cpp\
-					./shared/Elements/PddSurfaceMassBalanceSicopolis.cpp\
-					./shared/Elements/ComputeDelta18oTemperaturePrecipitation.cpp\
-					./shared/Elements/ComputeMungsmTemperaturePrecipitation.cpp\
-					./shared/Elements/ComputeD18OTemperaturePrecipitationFromPD.cpp\
-					./shared/Elements/DrainageFunctionWaterfraction.cpp\
-					./shared/Elements/EstarComponents.cpp\
-					./shared/String/DescriptorIndex.cpp\
-					./toolkits/issm/IssmToolkitUtils.cpp\
-					./toolkits/issm/IssmSolver.cpp\
-					./toolkits/mpi/issmmpi.cpp\
-					./toolkits/mpi/commops/DetermineLocalSize.cpp\
-					./toolkits/mpi/commops/DetermineGlobalSize.cpp\
-					./toolkits/mpi/commops/DetermineRowRankFromLocalSize.cpp\
-					./toolkits/mpi/commops/GetOwnershipBoundariesFromRange.cpp\
-					./toolkits/ToolkitOptions.cpp\
-					./modules/ModelProcessorx/ModelProcessorx.cpp\
-					./modules/ModelProcessorx/ElementsAndVerticesPartitioning.cpp\
-					./modules/ModelProcessorx/NodesPartitioning.cpp\
-					./modules/ModelProcessorx/EdgesPartitioning.cpp\
-					./modules/ModelProcessorx/FacesPartitioning.cpp\
-					./modules/ModelProcessorx/CreateParameters.cpp\
-					./modules/ModelProcessorx/Autodiff/CreateParametersAutodiff.cpp\
-					./modules/ModelProcessorx/CreateFaces.cpp\
-					./modules/ModelProcessorx/CreateEdges.cpp\
-					./modules/ModelProcessorx/CreateSingleNodeToElementConnectivity.cpp\
-					./modules/ModelProcessorx/CreateNumberNodeToElementConnectivity.cpp\
-					./modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp\
-					./modules/ModelProcessorx/CreateNodes.cpp\
-					./modules/ParseToolkitsOptionsx/ParseToolkitsOptionsx.cpp\
-					./modules/NodesDofx/NodesDofx.cpp\
-					./modules/NodalValuex/NodalValuex.cpp\
-					./modules/VertexCoordinatesx/VertexCoordinatesx.cpp\
-					./modules/OutputResultsx/OutputResultsx.cpp\
-					./modules/InputDepthAverageAtBasex/InputDepthAverageAtBasex.cpp\
-					./modules/InputDuplicatex/InputDuplicatex.cpp\
-					./modules/InputExtrudex/InputExtrudex.cpp\
-					./modules/SurfaceAreax/SurfaceAreax.cpp\
-					./modules/AllocateSystemMatricesx/AllocateSystemMatricesx.cpp\
-					./modules/CreateJacobianMatrixx/CreateJacobianMatrixx.cpp\
-					./modules/SystemMatricesx/SystemMatricesx.cpp\
-					./modules/CreateNodalConstraintsx/CreateNodalConstraintsx.cpp\
-					./modules/UpdateDynamicConstraintsx/UpdateDynamicConstraintsx.cpp\
-					./modules/IoModelToConstraintsx/IoModelToConstraintsx.cpp\
-					./modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp\
-					./modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp\
-					./modules/InputUpdateFromSolutionx/InputUpdateFromSolutionx.cpp\
-					./modules/GeothermalFluxx/GeothermalFluxx.cpp\
-					./modules/GetSolutionFromInputsx/GetSolutionFromInputsx.cpp\
-					./modules/GetVectorFromInputsx/GetVectorFromInputsx.cpp\
-					./modules/InputUpdateFromVectorx/InputUpdateFromVectorx.cpp\
-					./modules/FloatingiceMeltingRatex/FloatingiceMeltingRatex.cpp\
-					./modules/FloatingiceMeltingRatePicox/FloatingiceMeltingRatePicox.cpp\
-					./modules/FrontalForcingsx/FrontalForcingsx.cpp\
-					./modules/ConfigureObjectsx/ConfigureObjectsx.cpp\
-					./modules/SpcNodesx/SpcNodesx.cpp\
-					./modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp\
-					./modules/SurfaceMassBalancex/Gembx.cpp\
-					./modules/Reducevectorgtofx/Reducevectorgtofx.cpp\
-					./modules/Reduceloadx/Reduceloadx.cpp\
-					./modules/ConstraintsStatex/ConstraintsStatex.cpp\
-					./modules/ResetConstraintsx/ResetConstraintsx.cpp\
-					./modules/ResetFSBasalBoundaryConditionx/ResetFSBasalBoundaryConditionx.cpp\
-					./modules/Solverx/Solverx.cpp\
-					./modules/Mergesolutionfromftogx/Mergesolutionfromftogx.cpp\
-					./cores/ProcessArguments.cpp\
-					./cores/ResetBoundaryConditions.cpp\
-					./cores/WrapperCorePointerFromSolutionEnum.cpp\
-					./cores/CorePointerFromSolutionEnum.cpp\
-					./cores/ad_core.cpp\
-					./cores/adgradient_core.cpp\
-					./main/EnvironmentInit.cpp\
-					./main/EnvironmentFinalize.cpp\
-					./analyses/EnumToAnalysis.cpp\
-					./solutionsequences/solutionsequence_la.cpp\
-					./solutionsequences/solutionsequence_la_theta.cpp\
-					./solutionsequences/solutionsequence_linear.cpp\
-					./solutionsequences/solutionsequence_nonlinear.cpp\
-					./solutionsequences/solutionsequence_newton.cpp\
-					./solutionsequences/solutionsequence_fct.cpp\
-					./solutionsequences/solutionsequence_schurcg.cpp\
-					./solutionsequences/convergence.cpp\
-					./classes/Options/Options.cpp\
-					./classes/Options/OptionUtilities.cpp\
-					./classes/RiftStruct.cpp\
-					./modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp \
-					./cores/transient_core.cpp\
-					./cores/steadystate_core.cpp\
-					./cores/masstransport_core.cpp\
-					./cores/depthaverage_core.cpp\
-					./cores/extrudefrombase_core.cpp\
-					./cores/extrudefromtop_core.cpp\
-					./cores/thermal_core.cpp\
-					./cores/smb_core.cpp\
-					./cores/bmb_core.cpp\
-					./solutionsequences/solutionsequence_thermal_nonlinear.cpp\
-					./modules/ControlInputSetGradientx/ControlInputSetGradientx.cpp\
-					./modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp\
-					./modules/SetControlInputsFromVectorx/SetControlInputsFromVectorx.cpp\
-					./modules/ModelProcessorx/Control/CreateParametersControl.cpp\
-					./modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp\
-					./modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp\
-					./modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp\
-					./modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp\
-					./modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp\
-					./modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp\
-					./modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp\
-					./modules/Gradjx/Gradjx.cpp\
-					./modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp\
-					./modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp\
-					./modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp\
-					./modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp\
-					./modules/RheologyBAbsGradientx/RheologyBAbsGradientx.cpp\
-					./classes/Inputs/ControlInput.cpp\
-					./shared/Numerics/BrentSearch.cpp\
-					./cores/control_core.cpp\
-					./cores/controltao_core.cpp\
-					./cores/controlm1qn3_core.cpp\
-					./cores/controladm1qn3_core.cpp\
-					./cores/controlvalidation_core.cpp\
-					./cores/adjointstressbalance_core.cpp\
-					./cores/adjointbalancethickness_core.cpp\
-					./cores/adjointbalancethickness2_core.cpp\
-					./cores/AdjointCorePointerFromSolutionEnum.cpp\
-					./solutionsequences/solutionsequence_adjoint_linear.cpp\
-					./cores/hydrology_core.cpp\
-					./solutionsequences/solutionsequence_hydro_nonlinear.cpp\
-					./solutionsequences/solutionsequence_shakti_nonlinear.cpp\
-					./solutionsequences/solutionsequence_glads_nonlinear.cpp\
-					./cores/stressbalance_core.cpp\
-					./solutionsequences/solutionsequence_stokescoupling_nonlinear.cpp\
-					./cores/balancethickness_core.cpp \
-					./cores/balancethickness2_core.cpp \
-					./cores/balancevelocity_core.cpp \
-					./cores/dummy_core.cpp\
-					./cores/surfaceslope_core.cpp\
-					./cores/bedslope_core.cpp\
-					./cores/damage_core.cpp\
-					./cores/levelsetfunctionslope_core.cpp\
-					./cores/movingfront_core.cpp\
-					./modules/GroundinglineMigrationx/GroundinglineMigrationx.cpp\
-					./classes/Loads/Riftfront.cpp\
-					./modules/ConstraintsStatex/RiftConstraintsState.cpp\
-					./modules/ModelProcessorx/CreateOutputDefinitions.cpp\
-					./modules/OutputDefinitionsResponsex/OutputDefinitionsResponsex.cpp\
-					./modules/InterpFromMeshToMesh2dx/InterpFromMeshToMesh2dx.cpp\
-					./classes/Inputs/PentaInput.cpp\
-					./classes/Inputs/TetraInput.cpp
+issm_sources += \
+	./datastructures/DataSet.cpp \
+	./classes/gauss/GaussSeg.cpp \
+	./classes/gauss/GaussTria.cpp \
+	./classes/gauss/GaussTetra.cpp \
+	./classes/gauss/GaussPenta.cpp \
+	./classes/IoModel.cpp \
+	./classes/FemModel.cpp \
+	./classes/Loads/Friction.cpp \
+	./classes/Constraints/SpcTransient.cpp \
+	./classes/DependentObject.cpp \
+	./classes/Contours.cpp \
+	./classes/Vertices.cpp \
+	./classes/Nodes.cpp \
+	./classes/Numberedcostfunction.cpp \
+	./classes/Misfit.cpp \
+	./classes/Cfsurfacesquare.cpp \
+	./classes/Cfdragcoeffabsgrad.cpp \
+	./classes/Cfsurfacelogvel.cpp \
+	./classes/Regionaloutput.cpp \
+	./classes/Nodalvalue.cpp \
+	./classes/Node.cpp \
+	./classes/Vertex.cpp \
+	./classes/Hook.cpp \
+	./classes/Radar.cpp \
+	./classes/ExternalResults/Results.cpp \
+	./classes/Elements/Element.cpp \
+	./classes/Elements/Elements.cpp \
+	./classes/Elements/ElementHook.cpp \
+	./classes/Elements/Seg.cpp \
+	./classes/Elements/SegRef.cpp \
+	./classes/Elements/Tria.cpp \
+	./classes/Elements/TriaRef.cpp \
+	./classes/Elements/Tetra.cpp \
+	./classes/Elements/TetraRef.cpp \
+	./classes/Elements/Penta.cpp \
+	./classes/Elements/PentaRef.cpp \
+	./classes/Materials/Materials.cpp \
+	./classes/Materials/Matice.cpp \
+	./classes/Materials/Matlitho.cpp \
+	./classes/Materials/Matestar.cpp \
+	./classes/Constraints/Constraints.cpp \
+	./classes/Constraints/SpcStatic.cpp \
+	./classes/Constraints/SpcDynamic.cpp \
+	./classes/Loads/Channel.cpp \
+	./classes/Loads/Loads.cpp \
+	./classes/Loads/Penpair.cpp \
+	./classes/Loads/Pengrid.cpp \
+	./classes/Loads/Moulin.cpp \
+	./classes/Loads/Numericalflux.cpp \
+	./classes/Loads/Neumannflux.cpp \
+	./classes/matrix/ElementMatrix.cpp \
+	./classes/matrix/ElementVector.cpp \
+	./classes/Params/Parameters.cpp \
+	./classes/Params/BoolParam.cpp \
+	./classes/Params/IntParam.cpp \
+	./classes/Params/IntVecParam.cpp \
+	./classes/Params/IntMatParam.cpp \
+	./classes/Params/DoubleParam.cpp \
+	./classes/Params/FileParam.cpp \
+	./classes/Params/StringArrayParam.cpp \
+	./classes/Params/DoubleMatParam.cpp \
+	./classes/Params/DoubleTransientMatParam.cpp \
+	./classes/Params/DoubleMatArrayParam.cpp \
+	./classes/Params/DoubleVecParam.cpp \
+	./classes/Params/StringParam.cpp \
+	./classes/Params/MatrixParam.cpp \
+	./classes/Params/VectorParam.cpp \
+	./classes/Params/TransientParam.cpp \
+	./classes/Params/TransientArrayParam.cpp \
+	./classes/Params/DataSetParam.cpp \
+	./classes/Profiler.cpp \
+	./shared/Matrix/MatrixUtils.cpp \
+	./shared/io/Disk/pfopen.cpp \
+	./shared/io/Disk/pfclose.cpp \
+	./shared/io/Disk/WriteLockFile.cpp \
+	./shared/io/Print/PrintfFunction.cpp \
+	./shared/io/Comm/IssmComm.cpp \
+	./shared/io/Marshalling/IoCodeConversions.cpp \
+	./shared/LatLong/Ll2xyx.cpp \
+	./shared/LatLong/Xy2llx.cpp \
+	./shared/FSanalyticals/fsanalyticals.cpp \
+	./shared/Enum/EnumToStringx.cpp \
+	./shared/Enum/StringToEnumx.cpp \
+	./shared/Numerics/Verbosity.cpp \
+	./shared/Numerics/GaussPoints.cpp \
+	./shared/Numerics/cross.cpp \
+	./shared/Numerics/cubic.cpp \
+	./shared/Numerics/NewtonSolveDnorm.cpp \
+	./shared/Numerics/ODE1.cpp \
+	./shared/Numerics/extrema.cpp \
+	./shared/Numerics/legendre.cpp \
+	./shared/Numerics/XZvectorsToCoordinateSystem.cpp \
+	./shared/Exceptions/Exceptions.cpp \
+	./shared/Sorting/binary_search.cpp \
+	./shared/Elements/Cuffey.cpp \
+	./shared/Elements/BuddJacka.cpp \
+	./shared/Elements/CuffeyTemperate.cpp \
+	./shared/Elements/StressIntensityIntegralWeight.cpp \
+	./shared/Elements/Paterson.cpp \
+	./shared/Elements/Arrhenius.cpp \
+	./shared/Elements/NyeCO2.cpp \
+	./shared/Elements/NyeH2O.cpp \
+	./shared/Elements/LliboutryDuval.cpp \
+	./shared/Elements/PrintArrays.cpp \
+	./shared/Elements/PddSurfaceMassBalance.cpp \
+	./shared/Elements/PddSurfaceMassBalanceSicopolis.cpp \
+	./shared/Elements/ComputeDelta18oTemperaturePrecipitation.cpp \
+	./shared/Elements/ComputeMungsmTemperaturePrecipitation.cpp \
+	./shared/Elements/ComputeD18OTemperaturePrecipitationFromPD.cpp \
+	./shared/Elements/DrainageFunctionWaterfraction.cpp \
+	./shared/Elements/EstarComponents.cpp \
+	./shared/String/DescriptorIndex.cpp \
+	./toolkits/issm/IssmToolkitUtils.cpp \
+	./toolkits/issm/IssmSolver.cpp \
+	./toolkits/mpi/issmmpi.cpp \
+	./toolkits/mpi/commops/DetermineLocalSize.cpp \
+	./toolkits/mpi/commops/DetermineGlobalSize.cpp \
+	./toolkits/mpi/commops/DetermineRowRankFromLocalSize.cpp \
+	./toolkits/mpi/commops/GetOwnershipBoundariesFromRange.cpp \
+	./toolkits/ToolkitOptions.cpp \
+	./modules/ModelProcessorx/ModelProcessorx.cpp \
+	./modules/ModelProcessorx/ElementsAndVerticesPartitioning.cpp \
+	./modules/ModelProcessorx/NodesPartitioning.cpp \
+	./modules/ModelProcessorx/EdgesPartitioning.cpp \
+	./modules/ModelProcessorx/FacesPartitioning.cpp \
+	./modules/ModelProcessorx/CreateParameters.cpp \
+	./modules/ModelProcessorx/Autodiff/CreateParametersAutodiff.cpp \
+	./modules/ModelProcessorx/CreateFaces.cpp \
+	./modules/ModelProcessorx/CreateEdges.cpp \
+	./modules/ModelProcessorx/CreateSingleNodeToElementConnectivity.cpp \
+	./modules/ModelProcessorx/CreateNumberNodeToElementConnectivity.cpp \
+	./modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp \
+	./modules/ModelProcessorx/CreateNodes.cpp \
+	./modules/ParseToolkitsOptionsx/ParseToolkitsOptionsx.cpp \
+	./modules/NodesDofx/NodesDofx.cpp \
+	./modules/NodalValuex/NodalValuex.cpp \
+	./modules/VertexCoordinatesx/VertexCoordinatesx.cpp \
+	./modules/OutputResultsx/OutputResultsx.cpp \
+	./modules/InputDepthAverageAtBasex/InputDepthAverageAtBasex.cpp \
+	./modules/InputDuplicatex/InputDuplicatex.cpp \
+	./modules/InputExtrudex/InputExtrudex.cpp \
+	./modules/SurfaceAreax/SurfaceAreax.cpp \
+	./modules/AllocateSystemMatricesx/AllocateSystemMatricesx.cpp \
+	./modules/CreateJacobianMatrixx/CreateJacobianMatrixx.cpp \
+	./modules/SystemMatricesx/SystemMatricesx.cpp \
+	./modules/CreateNodalConstraintsx/CreateNodalConstraintsx.cpp \
+	./modules/UpdateDynamicConstraintsx/UpdateDynamicConstraintsx.cpp \
+	./modules/IoModelToConstraintsx/IoModelToConstraintsx.cpp \
+	./modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp \
+	./modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp \
+	./modules/InputUpdateFromSolutionx/InputUpdateFromSolutionx.cpp \
+	./modules/GeothermalFluxx/GeothermalFluxx.cpp \
+	./modules/GetSolutionFromInputsx/GetSolutionFromInputsx.cpp \
+	./modules/GetVectorFromInputsx/GetVectorFromInputsx.cpp \
+	./modules/InputUpdateFromVectorx/InputUpdateFromVectorx.cpp \
+	./modules/FloatingiceMeltingRatex/FloatingiceMeltingRatex.cpp \
+	./modules/FloatingiceMeltingRatePicox/FloatingiceMeltingRatePicox.cpp \
+	./modules/FrontalForcingsx/FrontalForcingsx.cpp \
+	./modules/ConfigureObjectsx/ConfigureObjectsx.cpp \
+	./modules/SpcNodesx/SpcNodesx.cpp \
+	./modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp \
+	./modules/SurfaceMassBalancex/Gembx.cpp \
+	./modules/Reducevectorgtofx/Reducevectorgtofx.cpp \
+	./modules/Reduceloadx/Reduceloadx.cpp \
+	./modules/ConstraintsStatex/ConstraintsStatex.cpp \
+	./modules/ResetConstraintsx/ResetConstraintsx.cpp \
+	./modules/ResetFSBasalBoundaryConditionx/ResetFSBasalBoundaryConditionx.cpp \
+	./modules/Solverx/Solverx.cpp \
+	./modules/Mergesolutionfromftogx/Mergesolutionfromftogx.cpp \
+	./cores/ProcessArguments.cpp \
+	./cores/ResetBoundaryConditions.cpp \
+	./cores/WrapperCorePointerFromSolutionEnum.cpp \
+	./cores/CorePointerFromSolutionEnum.cpp \
+	./cores/ad_core.cpp \
+	./cores/adgradient_core.cpp \
+	./main/EnvironmentInit.cpp \
+	./main/EnvironmentFinalize.cpp \
+	./analyses/EnumToAnalysis.cpp \
+	./solutionsequences/solutionsequence_la.cpp \
+	./solutionsequences/solutionsequence_la_theta.cpp \
+	./solutionsequences/solutionsequence_linear.cpp \
+	./solutionsequences/solutionsequence_nonlinear.cpp \
+	./solutionsequences/solutionsequence_newton.cpp \
+	./solutionsequences/solutionsequence_fct.cpp \
+	./solutionsequences/solutionsequence_schurcg.cpp \
+	./solutionsequences/convergence.cpp \
+	./classes/Options/Options.cpp \
+	./classes/Options/OptionUtilities.cpp \
+	./classes/RiftStruct.cpp \
+	./modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp \
+	./cores/transient_core.cpp \
+	./cores/steadystate_core.cpp \
+	./cores/masstransport_core.cpp \
+	./cores/depthaverage_core.cpp \
+	./cores/extrudefrombase_core.cpp \
+	./cores/extrudefromtop_core.cpp \
+	./cores/thermal_core.cpp \
+	./cores/smb_core.cpp \
+	./cores/bmb_core.cpp \
+	./solutionsequences/solutionsequence_thermal_nonlinear.cpp \
+	./modules/ControlInputSetGradientx/ControlInputSetGradientx.cpp \
+	./modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp \
+	./modules/SetControlInputsFromVectorx/SetControlInputsFromVectorx.cpp \
+	./modules/ModelProcessorx/Control/CreateParametersControl.cpp \
+	./modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp \
+	./modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp \
+	./modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp \
+	./modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp \
+	./modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp \
+	./modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp \
+	./modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp \
+	./modules/Gradjx/Gradjx.cpp \
+	./modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp \
+	./modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp \
+	./modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp \
+	./modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp \
+	./modules/RheologyBAbsGradientx/RheologyBAbsGradientx.cpp \
+	./shared/Numerics/BrentSearch.cpp \
+	./cores/control_core.cpp \
+	./cores/controltao_core.cpp \
+	./cores/controlm1qn3_core.cpp \
+	./cores/controladm1qn3_core.cpp \
+	./cores/controlvalidation_core.cpp \
+	./cores/adjointstressbalance_core.cpp \
+	./cores/adjointbalancethickness_core.cpp \
+	./cores/adjointbalancethickness2_core.cpp \
+	./cores/AdjointCorePointerFromSolutionEnum.cpp \
+	./solutionsequences/solutionsequence_adjoint_linear.cpp \
+	./cores/hydrology_core.cpp \
+	./solutionsequences/solutionsequence_hydro_nonlinear.cpp \
+	./solutionsequences/solutionsequence_shakti_nonlinear.cpp \
+	./solutionsequences/solutionsequence_glads_nonlinear.cpp \
+	./cores/stressbalance_core.cpp \
+	./solutionsequences/solutionsequence_stokescoupling_nonlinear.cpp \
+	./cores/balancethickness_core.cpp \
+	./cores/balancethickness2_core.cpp \
+	./cores/balancevelocity_core.cpp \
+	./cores/dummy_core.cpp \
+	./cores/surfaceslope_core.cpp \
+	./cores/bedslope_core.cpp \
+	./cores/damage_core.cpp \
+	./cores/levelsetfunctionslope_core.cpp \
+	./cores/movingfront_core.cpp \
+	./modules/GroundinglineMigrationx/GroundinglineMigrationx.cpp \
+	./classes/Loads/Riftfront.cpp \
+	./modules/ConstraintsStatex/RiftConstraintsState.cpp \
+	./modules/ModelProcessorx/CreateOutputDefinitions.cpp \
+	./modules/OutputDefinitionsResponsex/OutputDefinitionsResponsex.cpp \
+	./modules/InterpFromMeshToMesh2dx/InterpFromMeshToMesh2dx.cpp \
+	./classes/Inputs2/Inputs2.cpp \
+	./classes/Inputs2/BoolInput2.cpp \
+	./classes/Inputs2/IntInput2.cpp \
+	./classes/Inputs2/ElementInput2.cpp \
+	./classes/Inputs2/SegInput2.cpp \
+	./classes/Inputs2/TriaInput2.cpp \
+	./classes/Inputs2/PentaInput2.cpp \
+	./classes/Inputs2/DatasetInput2.cpp \
+	./classes/Inputs2/ControlInput2.cpp \
+	./classes/Inputs2/TransientInput2.cpp \
+	./classes/Inputs2/ArrayInput2.cpp
 #}}}
 #ADJOINTMPI/MeDiPack sources {{{
@@ -338,34 +340,36 @@
 #DAKOTA sources  {{{
 if DAKOTA
-issm_sources +=  ./classes/Dakota/IssmDirectApplicInterface.h\
-					  ./classes/Dakota/IssmParallelDirectApplicInterface.cpp\
-					  ./modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp\
-					  ./modules/InputUpdateFromVectorDakotax/InputUpdateFromVectorDakotax.cpp\
-					  ./modules/InputUpdateFromMatrixDakotax/InputUpdateFromMatrixDakotax.cpp\
-					  ./modules/AverageOntoPartitionx/AverageOntoPartitionx.cpp\
-					  ./modules/ModelProcessorx/Dakota/CreateParametersDakota.cpp\
-					  ./modules/ModelProcessorx/Dakota/UpdateElementsAndMaterialsDakota.cpp\
-					  ./cores/dakota_core.cpp
+issm_sources += \
+	./classes/Dakota/IssmDirectApplicInterface.h \
+	./classes/Dakota/IssmParallelDirectApplicInterface.cpp \
+	./modules/InputUpdateFromDakotax/InputUpdateFromDakotax.cpp \
+	./modules/InputUpdateFromVectorDakotax/InputUpdateFromVectorDakotax.cpp \
+	./modules/InputUpdateFromMatrixDakotax/InputUpdateFromMatrixDakotax.cpp \
+	./modules/AverageOntoPartitionx/AverageOntoPartitionx.cpp \
+	./modules/ModelProcessorx/Dakota/CreateParametersDakota.cpp \
+	./modules/ModelProcessorx/Dakota/UpdateElementsAndMaterialsDakota.cpp \
+	./cores/dakota_core.cpp
 endif
 #}}}
 #Petsc sources  {{{
 if PETSC
-issm_sources += ./toolkits/petsc\
-					./toolkits/petsc/patches\
-					./toolkits/petsc/patches/VecToMPISerial.cpp\
-					./toolkits/petsc/patches/MatToSerial.cpp\
-					./toolkits/petsc/patches/NewVec.cpp\
-					./toolkits/petsc/patches/PetscOptionsDetermineSolverType.cpp\
-					./toolkits/petsc/patches/NewMat.cpp\
-					./toolkits/petsc/patches/VecFree.cpp\
-					./toolkits/petsc/patches/KSPFree.cpp\
-					./toolkits/petsc/patches/MatFree.cpp\
-					./toolkits/petsc/patches/MatMultPatch.cpp\
-					./toolkits/petsc/patches/ISSMToPetscMatrixType.cpp\
-					./toolkits/petsc/patches/ISSMToPetscInsertMode.cpp\
-					./toolkits/petsc/patches/ISSMToPetscNormMode.cpp\
-					./toolkits/petsc/objects/PetscMat.cpp\
-					./toolkits/petsc/objects/PetscVec.cpp\
-					./toolkits/petsc/objects/PetscSolver.cpp
+issm_sources += \
+	./toolkits/petsc \
+	./toolkits/petsc/patches \
+	./toolkits/petsc/patches/VecToMPISerial.cpp \
+	./toolkits/petsc/patches/MatToSerial.cpp \
+	./toolkits/petsc/patches/NewVec.cpp \
+	./toolkits/petsc/patches/PetscOptionsDetermineSolverType.cpp \
+	./toolkits/petsc/patches/NewMat.cpp \
+	./toolkits/petsc/patches/VecFree.cpp \
+	./toolkits/petsc/patches/KSPFree.cpp \
+	./toolkits/petsc/patches/MatFree.cpp \
+	./toolkits/petsc/patches/MatMultPatch.cpp \
+	./toolkits/petsc/patches/ISSMToPetscMatrixType.cpp \
+	./toolkits/petsc/patches/ISSMToPetscInsertMode.cpp \
+	./toolkits/petsc/patches/ISSMToPetscNormMode.cpp \
+	./toolkits/petsc/objects/PetscMat.cpp \
+	./toolkits/petsc/objects/PetscVec.cpp \
+	./toolkits/petsc/objects/PetscSolver.cpp
 endif
 #}}}
@@ -493,49 +497,53 @@
 endif
 #}}}
-#Gia sources  (only if have fortran){{{
+#Gia sources (only if have fortran){{{
 if GIAIVINS
 if FORTRAN
-issm_sources +=  ./cores/gia_core.cpp\
-					./analyses/GiaIvinsAnalysis.cpp\
-					./modules/GiaDeflectionCorex/GiaDeflectionCorex.cpp\
-					./modules/GiaDeflectionCorex/distme.f\
-					./modules/GiaDeflectionCorex/freed.f\
-					./modules/GiaDeflectionCorex/ojrule.f\
-					./modules/GiaDeflectionCorex/pwise.f\
-					./modules/GiaDeflectionCorex/qwise.f\
-					./modules/GiaDeflectionCorex/stot.f\
-					./modules/GiaDeflectionCorex/what0.f
-endif
-endif
-#}}}
-#Love sources  (only if have fortran){{{
+issm_sources += \
+	./cores/gia_core.cpp \
+	./analyses/GiaIvinsAnalysis.cpp \
+	./modules/GiaDeflectionCorex/GiaDeflectionCorex.cpp \
+	./modules/GiaDeflectionCorex/distme.f \
+	./modules/GiaDeflectionCorex/freed.f \
+	./modules/GiaDeflectionCorex/ojrule.f \
+	./modules/GiaDeflectionCorex/pwise.f \
+	./modules/GiaDeflectionCorex/qwise.f \
+	./modules/GiaDeflectionCorex/stot.f \
+	./modules/GiaDeflectionCorex/what0.f
+endif
+endif
+#}}}
+#Love sources (only if have fortran){{{
 if LOVE
 if FORTRAN
-issm_sources +=  ./cores/love_core.cpp\
-				 ./analyses/LoveAnalysis.cpp\
-				./modules/FourierLoveCorex/FourierLoveCorex.cpp\
-				./modules/FourierLoveCorex/lnb_param.f90\
-				./modules/FourierLoveCorex/model.f90\
-				./modules/FourierLoveCorex/util.f90\
-				./modules/FourierLoveCorex/lovenb_sub.f90\
-				./modules/FourierLoveCorex/love_numbers.f90
+issm_sources += \
+	./cores/love_core.cpp \
+	./analyses/LoveAnalysis.cpp \
+	./modules/FourierLoveCorex/FourierLoveCorex.cpp \
+	./modules/FourierLoveCorex/lnb_param.f90 \
+	./modules/FourierLoveCorex/model.f90 \
+	./modules/FourierLoveCorex/util.f90 \
+	./modules/FourierLoveCorex/lovenb_sub.f90 \
+	./modules/FourierLoveCorex/love_numbers.f90
 endif
 endif
 #}}}
 #Esa sources  {{{
-if ESA 
-issm_sources +=  ./cores/esa_core.cpp\
-					./analyses/EsaAnalysis.cpp
+if ESA
+issm_sources += \
+	./cores/esa_core.cpp \
+	./analyses/EsaAnalysis.cpp
 endif
 #}}}
 #Oceansources  {{{
 if OCEAN
-issm_sources +=  ./modules/OceanExchangeDatax/OceanExchangeDatax.cpp
+issm_sources += ./modules/OceanExchangeDatax/OceanExchangeDatax.cpp
 endif
 #}}}
 #Slr sources  {{{
 if SEALEVELRISE
-issm_sources +=  ./cores/sealevelrise_core.cpp\
-				 ./analyses/SealevelriseAnalysis.cpp
+issm_sources += \
+	./cores/sealevelrise_core.cpp \
+	./analyses/SealevelriseAnalysis.cpp
 endif
 #}}}
@@ -559,37 +567,38 @@
 #Wrapper sources
 #Kml sources  {{{
-kml_sources = ./modules/Exp2Kmlx/Exp2Kmlx.cpp\
-				  ./modules/Kml2Expx/Kml2Expx.cpp\
-				  ./modules/Shp2Kmlx/Shp2Kmlx.cpp\
-				  ./modules/KMLFileReadx/KMLFileReadx.cpp\
-				  ./modules/KMLMeshWritex/KMLMeshWritex.cpp\
-				  ./modules/KMLOverlayx/KMLOverlayx.cpp\
-				  ./kml/KML_Attribute.cpp\
-				  ./kml/KML_Comment.cpp\
-				  ./kml/KML_ColorStyle.cpp\
-				  ./kml/KML_Container.cpp\
-				  ./kml/KML_Document.cpp\
-				  ./kml/KML_Feature.cpp\
-				  ./kml/KML_File.cpp\
-				  ./kml/KML_Folder.cpp\
-				  ./kml/KML_Geometry.cpp\
-				  ./kml/KML_GroundOverlay.cpp\
-				  ./kml/KML_Icon.cpp\
-				  ./kml/KML_LatLonBox.cpp\
-				  ./kml/KML_LinearRing.cpp\
-				  ./kml/KML_LineString.cpp\
-				  ./kml/KML_LineStyle.cpp\
-				  ./kml/KML_MultiGeometry.cpp\
-				  ./kml/KML_Object.cpp\
-				  ./kml/KML_Overlay.cpp\
-				  ./kml/KML_Point.cpp\
-				  ./kml/KML_Placemark.cpp\
-				  ./kml/KML_Polygon.cpp\
-				  ./kml/KML_PolyStyle.cpp\
-				  ./kml/KML_Style.cpp\
-				  ./kml/KML_StyleSelector.cpp\
-				  ./kml/KML_SubStyle.cpp\
-				  ./kml/KML_Unknown.cpp\
-				  ./kml/KMLFileReadUtils.cpp
+kml_sources = \
+	./modules/Exp2Kmlx/Exp2Kmlx.cpp \
+	./modules/Kml2Expx/Kml2Expx.cpp \
+	./modules/Shp2Kmlx/Shp2Kmlx.cpp \
+	./modules/KMLFileReadx/KMLFileReadx.cpp \
+	./modules/KMLMeshWritex/KMLMeshWritex.cpp \
+	./modules/KMLOverlayx/KMLOverlayx.cpp \
+	./kml/KML_Attribute.cpp \
+	./kml/KML_Comment.cpp \
+	./kml/KML_ColorStyle.cpp \
+	./kml/KML_Container.cpp \
+	./kml/KML_Document.cpp \
+	./kml/KML_Feature.cpp \
+	./kml/KML_File.cpp \
+	./kml/KML_Folder.cpp \
+	./kml/KML_Geometry.cpp \
+	./kml/KML_GroundOverlay.cpp \
+	./kml/KML_Icon.cpp \
+	./kml/KML_LatLonBox.cpp \
+	./kml/KML_LinearRing.cpp \
+	./kml/KML_LineString.cpp \
+	./kml/KML_LineStyle.cpp \
+	./kml/KML_MultiGeometry.cpp \
+	./kml/KML_Object.cpp \
+	./kml/KML_Overlay.cpp \
+	./kml/KML_Point.cpp \
+	./kml/KML_Placemark.cpp \
+	./kml/KML_Polygon.cpp \
+	./kml/KML_PolyStyle.cpp \
+	./kml/KML_Style.cpp \
+	./kml/KML_StyleSelector.cpp \
+	./kml/KML_SubStyle.cpp \
+	./kml/KML_Unknown.cpp \
+	./kml/KMLFileReadUtils.cpp
 #}}}
 #NEOPZ sources  {{{
@@ -597,40 +606,43 @@
 #}}}
 #Modules sources{{{
-modules_sources= ./shared/Threads/LaunchThread.cpp\
-			./shared/Threads/PartitionRange.cpp\
-			./shared/Exp/exp.cpp\
-			./shared/Triangle/AssociateSegmentToElement.cpp\
-			./shared/Triangle/GridInsideHole.cpp\
-			./shared/Triangle/OrderSegments.cpp\
-			./shared/Triangle/SplitMeshForRifts.cpp\
-			./shared/Triangle/TriangleUtils.cpp\
-			./modules/Trianglex/Trianglex.cpp\
-			./modules/ProcessRiftsx/ProcessRiftsx.cpp\
-			./modules/PointCloudFindNeighborsx/PointCloudFindNeighborsx.cpp\
-			./modules/PointCloudFindNeighborsx/PointCloudFindNeighborsxt.cpp\
-			./modules/InterpFromGridToMeshx/InterpFromGridToMeshx.cpp\
-			./modules/InterpFromMesh2dx/InterpFromMesh2dx.cpp\
-			./modules/InterpFromMesh2dx/InterpFromMesh2dxt.cpp\
-			./modules/InterpFromMeshToMesh3dx/InterpFromMeshToMesh3dx.cpp\
-			./modules/InterpFromMeshToGridx/InterpFromMeshToGridx.cpp\
-			./modules/MeshProfileIntersectionx/MeshProfileIntersectionx.cpp\
-			./modules/ContourToMeshx/ContourToMeshx.cpp\
-			./modules/ContourToMeshx/ContourToMeshxt.cpp\
-			./modules/ExpToLevelSetx/ExpToLevelSetx.cpp\
-			./modules/ExpToLevelSetx/ExpToLevelSetxt.cpp\
-			./modules/ContourToNodesx/ContourToNodesx.cpp\
-			./modules/DistanceToMaskBoundaryx/DistanceToMaskBoundaryx.cpp\
-			./modules/DistanceToMaskBoundaryx/DistanceToMaskBoundaryxt.cpp\
-			./modules/NodeConnectivityx/NodeConnectivityx.cpp\
-			./modules/ElementConnectivityx/ElementConnectivityx.cpp\
-			./modules/PropagateFlagsFromConnectivityx/PropagateFlagsFromConnectivityx.cpp
+modules_sources = \
+	./shared/Threads/LaunchThread.cpp \
+	./shared/Threads/PartitionRange.cpp \
+	./shared/Exp/exp.cpp \
+	./shared/Triangle/AssociateSegmentToElement.cpp \
+	./shared/Triangle/GridInsideHole.cpp \
+	./shared/Triangle/OrderSegments.cpp \
+	./shared/Triangle/SplitMeshForRifts.cpp \
+	./shared/Triangle/TriangleUtils.cpp \
+	./modules/Trianglex/Trianglex.cpp \
+	./modules/ProcessRiftsx/ProcessRiftsx.cpp \
+	./modules/PointCloudFindNeighborsx/PointCloudFindNeighborsx.cpp \
+	./modules/PointCloudFindNeighborsx/PointCloudFindNeighborsxt.cpp \
+	./modules/InterpFromGridToMeshx/InterpFromGridToMeshx.cpp \
+	./modules/InterpFromMesh2dx/InterpFromMesh2dx.cpp \
+	./modules/InterpFromMesh2dx/InterpFromMesh2dxt.cpp \
+	./modules/InterpFromMeshToMesh3dx/InterpFromMeshToMesh3dx.cpp \
+	./modules/InterpFromMeshToGridx/InterpFromMeshToGridx.cpp \
+	./modules/MeshProfileIntersectionx/MeshProfileIntersectionx.cpp \
+	./modules/ContourToMeshx/ContourToMeshx.cpp \
+	./modules/ContourToMeshx/ContourToMeshxt.cpp \
+	./modules/ExpToLevelSetx/ExpToLevelSetx.cpp \
+	./modules/ExpToLevelSetx/ExpToLevelSetxt.cpp \
+	./modules/ContourToNodesx/ContourToNodesx.cpp \
+	./modules/DistanceToMaskBoundaryx/DistanceToMaskBoundaryx.cpp \
+	./modules/DistanceToMaskBoundaryx/DistanceToMaskBoundaryxt.cpp \
+	./modules/NodeConnectivityx/NodeConnectivityx.cpp \
+	./modules/ElementConnectivityx/ElementConnectivityx.cpp \
+	./modules/PropagateFlagsFromConnectivityx/PropagateFlagsFromConnectivityx.cpp
+
 if CHACO
-modules_sources+= ./modules/Chacox/Chacox.cpp\
-						./modules/Chacox/input_parse.cpp\
-						./modules/Chacox/chaco_seconds.cpp\
-						./modules/Chacox/user_params.cpp
+modules_sources += \
+	./modules/Chacox/Chacox.cpp \
+	./modules/Chacox/input_parse.cpp \
+	./modules/Chacox/chaco_seconds.cpp \
+	./modules/Chacox/user_params.cpp
 endif
 if SCOTCH
-modules_sources+= ./modules/Scotchx/Scotchx.cpp
+modules_sources += ./modules/Scotchx/Scotchx.cpp
 endif
 #}}}
@@ -639,20 +651,22 @@
 #Kriging sources  {{{
 if KRIGING
-issm_sources += ./classes/kriging/Observations.cpp\
-					./classes/kriging/GaussianVariogram.cpp\
-					./classes/kriging/ExponentialVariogram.cpp\
-					./classes/kriging/SphericalVariogram.cpp\
-					./classes/kriging/PowerVariogram.cpp\
-					./classes/kriging/Quadtree.cpp\
-					./classes/kriging/Covertree.cpp\
-					./classes/kriging/Observation.cpp\
-					./modules/Krigingx/pKrigingx.cpp
-
-modules_sources +=./modules/Krigingx/Krigingx.cpp\
-						./modules/Krigingx/pKrigingx.cpp
+issm_sources += \
+	./classes/kriging/Observations.cpp \
+	./classes/kriging/GaussianVariogram.cpp \
+	./classes/kriging/ExponentialVariogram.cpp \
+	./classes/kriging/SphericalVariogram.cpp \
+	./classes/kriging/PowerVariogram.cpp \
+	./classes/kriging/Quadtree.cpp \
+	./classes/kriging/Covertree.cpp \
+	./classes/kriging/Observation.cpp \
+	./modules/Krigingx/pKrigingx.cpp
+
+modules_sources += \
+	./modules/Krigingx/Krigingx.cpp \
+	./modules/Krigingx/pKrigingx.cpp
 endif
 #}}}
 #Library flags and sources {{{
-ALLCXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS) 
+ALLCXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS)
 
 libISSMCore_la_SOURCES  = $(issm_sources)
@@ -662,5 +676,5 @@
 if !WINDOWS
 if !STANDALONE_LIBRARIES
-libISSMCore_la_LIBADD = $(PETSCLIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(SCALAPACKLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB)   $(ADOLCLIB) $(AMPILIB) $(ADJOINTMPILIB) $(METEOIOLIB) $(SNOWPACKLIB)
+libISSMCore_la_LIBADD = $(PETSCLIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(SCALAPACKLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(ADJOINTMPILIB) $(METEOIOLIB) $(SNOWPACKLIB)
 if FORTRAN
 libISSMCore_la_LIBADD += $(FLIBS) $(FORTRANLIB)
@@ -693,12 +707,24 @@
 endif
 
-if VERSION
+
 AM_LDFLAGS =
-else
-AM_LDFLAGS = -avoid-version
-endif
-
+
+if !VERSION
+AM_LDFLAGS += -avoid-version
+endif
+
+# NOTE:
+# - On Linux, We probably do not need the -static flag as long as we only
+#	generate static libraries for external packages. Dynamic system libraries
+#	will be linked to, whether we like it or not, if no static version is
+#	available.
+# - On macOC, static linking of binaries is not supported.
+#
 if STANDALONE_EXECUTABLES
-issm_LDFLAGS = -static
+if MAC
+AM_LDFLAGS += -Wl,-rpath,'@loader_path/../lib'
+else
+AM_LDFLAGS += -static -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN/../lib'
+endif
 endif
 
@@ -709,8 +735,8 @@
 libISSMModules_la_LDFLAGS = -static
 else
-libISSMModules_la_LDFLAGS = 
-endif
-else
-libISSMCore_la_LDFLAGS = 
+libISSMModules_la_LDFLAGS =
+endif
+else
+libISSMCore_la_LDFLAGS =
 libISSMOverload_la_LDFLAGS =
 endif
@@ -725,7 +751,7 @@
 if ANDROID
 if ANDROIDEXE
-bin_PROGRAMS = issm  issm_slr
-else
-bin_PROGRAMS = 
+bin_PROGRAMS = issm issm_slr
+else
+bin_PROGRAMS =
 endif
 else
@@ -733,5 +759,5 @@
 bin_PROGRAMS =
 else
-bin_PROGRAMS = issm  issm_slr
+bin_PROGRAMS = issm issm_slr
 endif
 endif
@@ -744,5 +770,5 @@
 
 #External packages
-LDADD +=  $(NEOPZLIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(SCALAPACKLIB) $(BLACSLIB) $(PETSCLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB)  $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB) $(AMPILIB) $(ADJOINTMPILIB) $(ADOLCLIB) $(MPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJ4LIB)
+LDADD += $(NEOPZLIB) $(TAOLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(SCALAPACKLIB) $(BLACSLIB) $(PETSCLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB) $(AMPILIB) $(ADJOINTMPILIB) $(ADOLCLIB) $(MPILIB) $(METEOIOLIB) $(SNOWPACKLIB) $(PROJ4LIB)
 
 if FORTRAN
@@ -765,5 +791,5 @@
 bin_PROGRAMS += kriging
 kriging_SOURCES = main/kriging.cpp
-kriging_CXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS) 
+kriging_CXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS)
 endif
 
@@ -771,5 +797,5 @@
 bin_PROGRAMS += issm_dakota
 issm_dakota_SOURCES = main/issm_dakota.cpp
-issm_dakota_CXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS) 
+issm_dakota_CXXFLAGS= $(CXXFLAGS) $(CXXOPTFLAGS) $(COPTFLAGS)
 issm_dakota_LDADD= $(LDADD)
 endif
Index: /issm/trunk/src/c/analyses/AdjointBalancethickness2Analysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/AdjointBalancethickness2Analysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/AdjointBalancethickness2Analysis.cpp	(revision 24686)
@@ -4,4 +4,5 @@
 #include "../shared/shared.h"
 #include "../modules/modules.h"
+#include "../classes/Inputs2/DatasetInput2.h"
 
 /*Model processor*/
@@ -18,5 +19,5 @@
 	return 1;
 }/*}}}*/
-void AdjointBalancethickness2Analysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void AdjointBalancethickness2Analysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 	_error_("not implemented yet");
 }/*}}}*/
@@ -65,9 +66,9 @@
 	element->FindParam(&num_responses,InversionNumCostFunctionsEnum);
 	element->FindParam(&responses,NULL,InversionCostFunctionsEnum);
-	Input* surface_input      = element->GetInput(SurfaceEnum);                          _assert_(surface_input);
-	Input* surfaceobs_input   = element->GetInput(InversionSurfaceObsEnum);              _assert_(surfaceobs_input);
-	Input* weights_input      = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input           = element->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vy_input           = element->GetInput(VyEnum);                                 _assert_(vy_input);
+	Input2* surface_input      = element->GetInput2(SurfaceEnum);                          _assert_(surface_input);
+	Input2* surfaceobs_input   = element->GetInput2(InversionSurfaceObsEnum);              _assert_(surfaceobs_input);
+	DatasetInput2* weights_input      = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input           = element->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vy_input           = element->GetInput2(VyEnum);                                 _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -171,5 +172,5 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* adjoint_input = element->GetInput(AdjointEnum);            _assert_(adjoint_input);
+	Input2* adjoint_input = element->GetInput2(AdjointEnum);            _assert_(adjoint_input);
 
 	Gauss* gauss=element->NewGauss(2);
@@ -215,10 +216,10 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* adjoint_input = element->GetInput(AdjointEnum);            _assert_(adjoint_input);
-	Input* omega_input   = element->GetInput(BalancethicknessOmegaEnum); _assert_(omega_input);
-	Input* surface_input = element->GetInput(SurfaceEnum); _assert_(surface_input); 
-	Input* surfaceslopex_input = element->GetInput(SurfaceSlopeXEnum); _assert_(surfaceslopex_input); 
-	Input* surfaceslopey_input = element->GetInput(SurfaceSlopeYEnum); _assert_(surfaceslopey_input); 
-	Input* velobs_input        = element->GetInput(InversionVelObsEnum); _assert_(velobs_input); 
+	Input2* adjoint_input = element->GetInput2(AdjointEnum);            _assert_(adjoint_input);
+	Input2* omega_input   = element->GetInput2(BalancethicknessOmegaEnum); _assert_(omega_input);
+	Input2* surface_input = element->GetInput2(SurfaceEnum); _assert_(surface_input); 
+	Input2* surfaceslopex_input = element->GetInput2(SurfaceSlopeXEnum); _assert_(surfaceslopex_input); 
+	Input2* surfaceslopey_input = element->GetInput2(SurfaceSlopeYEnum); _assert_(surfaceslopey_input); 
+	Input2* velobs_input        = element->GetInput2(InversionVelObsEnum); _assert_(velobs_input); 
 
 	Gauss* gauss=element->NewGauss(2);
@@ -272,6 +273,6 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* omega_input = element->GetInput(BalancethicknessOmegaEnum); _assert_(omega_input);
-	Input* weights_input         = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* omega_input = element->GetInput2(BalancethicknessOmegaEnum); _assert_(omega_input);
+	DatasetInput2* weights_input         = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -321,7 +322,7 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* omega_input = element->GetInput(BalancethicknessOmegaEnum);   _assert_(omega_input);
-	Input* omega0_input = element->GetInput(BalancethicknessOmega0Enum); _assert_(omega0_input);
-	Input* weights_input = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* omega_input = element->GetInput2(BalancethicknessOmegaEnum);   _assert_(omega_input);
+	Input2* omega0_input = element->GetInput2(BalancethicknessOmega0Enum); _assert_(omega0_input);
+	DatasetInput2* weights_input = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/analyses/AdjointBalancethickness2Analysis.h
===================================================================
--- /issm/trunk/src/c/analyses/AdjointBalancethickness2Analysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/AdjointBalancethickness2Analysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/AdjointBalancethicknessAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/AdjointBalancethicknessAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/AdjointBalancethicknessAnalysis.cpp	(revision 24686)
@@ -4,4 +4,5 @@
 #include "../shared/shared.h"
 #include "../modules/modules.h"
+#include "../classes/Inputs2/DatasetInput2.h"
 
 /*Model processor*/
@@ -18,5 +19,5 @@
 	return 1;
 }/*}}}*/
-void AdjointBalancethicknessAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void AdjointBalancethicknessAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 	_error_("not implemented yet");
 }/*}}}*/
@@ -85,9 +86,9 @@
 	basalelement->FindParam(&num_responses,InversionNumCostFunctionsEnum);
 	basalelement->FindParam(&responses,NULL,InversionCostFunctionsEnum);
-	Input* thickness_input    = basalelement->GetInput(ThicknessEnum);                          _assert_(thickness_input);
-	Input* thicknessobs_input = basalelement->GetInput(InversionThicknessObsEnum);              _assert_(thicknessobs_input);
-	Input* weights_input      = basalelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input           = basalelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vy_input           = basalelement->GetInput(VyEnum);                                 _assert_(vy_input);
+	Input2* thickness_input    = basalelement->GetInput2(ThicknessEnum);                          _assert_(thickness_input);
+	Input2* thicknessobs_input = basalelement->GetInput2(InversionThicknessObsEnum);              _assert_(thicknessobs_input);
+	DatasetInput2* weights_input      = basalelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input           = basalelement->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vy_input           = basalelement->GetInput2(VyEnum);                                 _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -282,6 +283,6 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* adjoint_input   = element->GetInput(AdjointEnum);   _assert_(adjoint_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* adjoint_input   = element->GetInput2(AdjointEnum);   _assert_(adjoint_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -329,6 +330,6 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* adjoint_input   = element->GetInput(AdjointEnum);   _assert_(adjoint_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* adjoint_input   = element->GetInput2(AdjointEnum);   _assert_(adjoint_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/analyses/AdjointBalancethicknessAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/AdjointBalancethicknessAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/AdjointBalancethicknessAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/AdjointHorizAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/AdjointHorizAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/AdjointHorizAnalysis.cpp	(revision 24686)
@@ -4,4 +4,5 @@
 #include "../shared/shared.h"
 #include "../modules/modules.h"
+#include "../classes/Inputs2/DatasetInput2.h"
 
 /*Model processing*/
@@ -18,5 +19,5 @@
 	_error_("not implemented");
 }/*}}}*/
-void AdjointHorizAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void AdjointHorizAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 	   _error_("not implemented yet");
 }/*}}}*/
@@ -86,9 +87,9 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input = element->GetInput(VxEnum);_assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum);_assert_(vy_input);
-	Input* vz_input = NULL;
+	Input2* vx_input = element->GetInput2(VxEnum);_assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum);_assert_(vy_input);
+	Input2* vz_input = NULL;
 	if(dim==3){
-		vz_input = element->GetInput(VzEnum);
+		vz_input = element->GetInput2(VzEnum);
 	}
 	else{
@@ -170,6 +171,6 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input = element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum); _assert_(vy_input);
+	Input2* vx_input = element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum); _assert_(vy_input);
 
 	/*Allocate dbasis*/
@@ -275,7 +276,7 @@
 	/*Retrieve all inputs and parameters*/
 	basalelement->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input        = basalelement->GetInput(VxEnum);       _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);       _assert_(vy_input);
-	Input* thickness_input = basalelement->GetInput(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);       _assert_(vy_input);
+	Input2* thickness_input = basalelement->GetInput2(ThicknessEnum); _assert_(thickness_input);
 
 	/*Allocate dbasis*/
@@ -374,12 +375,12 @@
 	element->FindParam(&num_responses,InversionNumCostFunctionsEnum);
 	element->FindParam(&responses,NULL,InversionCostFunctionsEnum);
-	Input* weights_input = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input      = element->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input   = element->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input    = NULL;
-	Input* vyobs_input = NULL;
+	DatasetInput2* weights_input = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input      = element->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input   = element->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input    = NULL;
+	Input2* vyobs_input = NULL;
 	if(domaintype!=Domain2DverticalEnum){
-		vy_input      = element->GetInput(VyEnum);                                 _assert_(vy_input);
-		vyobs_input   = element->GetInput(InversionVyObsEnum);                     _assert_(vyobs_input);
+		vy_input      = element->GetInput2(VyEnum);                                 _assert_(vy_input);
+		vyobs_input   = element->GetInput2(InversionVyObsEnum);                     _assert_(vyobs_input);
 	}
 	IssmDouble epsvel  = 2.220446049250313e-16;
@@ -611,12 +612,12 @@
 	element->FindParam(&num_responses,InversionNumCostFunctionsEnum);
 	element->FindParam(&responses,NULL,InversionCostFunctionsEnum);
-	Input* weights_input = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input      = element->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input   = element->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input=NULL;
-	Input* vyobs_input=NULL;
+	DatasetInput2* weights_input = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input      = element->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input   = element->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input=NULL;
+	Input2* vyobs_input=NULL;
 	if(domaintype!=Domain2DverticalEnum){
-		vy_input      = element->GetInput(VyEnum);                                 _assert_(vy_input);
-		vyobs_input   = element->GetInput(InversionVyObsEnum);                     _assert_(vyobs_input);
+		vy_input      = element->GetInput2(VyEnum);                                 _assert_(vy_input);
+		vyobs_input   = element->GetInput2(InversionVyObsEnum);                     _assert_(vyobs_input);
 	}
 	IssmDouble epsvel  = 2.220446049250313e-16;
@@ -862,12 +863,12 @@
 	basalelement->FindParam(&num_responses,InversionNumCostFunctionsEnum);
 	basalelement->FindParam(&responses,NULL,InversionCostFunctionsEnum);
-	Input* weights_input = basalelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input      = basalelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input   = basalelement->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input=NULL;
-	Input* vyobs_input=NULL;
+	DatasetInput2* weights_input = basalelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input      = basalelement->GetInput2(VxEnum);                                               _assert_(vx_input);
+	Input2* vxobs_input   = basalelement->GetInput2(InversionVxObsEnum);                                   _assert_(vxobs_input);
+	Input2* vy_input=NULL;
+	Input2* vyobs_input=NULL;
 	if(domaintype!=Domain2DverticalEnum){
-		vy_input      = basalelement->GetInput(VyEnum);                                 _assert_(vy_input);
-		vyobs_input   = basalelement->GetInput(InversionVyObsEnum);                     _assert_(vyobs_input);
+		vy_input      = basalelement->GetInput2(VyEnum);              _assert_(vy_input);
+		vyobs_input   = basalelement->GetInput2(InversionVyObsEnum);  _assert_(vyobs_input);
 	}
 	IssmDouble epsvel  = 2.220446049250313e-16;
@@ -1214,6 +1215,6 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* rheologyb_input = basalelement->GetInput(MaterialsRheologyBbarEnum);              _assert_(rheologyb_input);
-	Input* weights_input   = basalelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* rheologyb_input = basalelement->GetInput2(MaterialsRheologyBbarEnum);              _assert_(rheologyb_input);
+	DatasetInput2* weights_input   = basalelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1303,10 +1304,10 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input = basalelement->GetInput(ThicknessEnum);             _assert_(thickness_input);
-	Input* vx_input        = basalelement->GetInput(VxEnum);                    _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);                    _assert_(vy_input);
-	Input* adjointx_input  = basalelement->GetInput(AdjointxEnum);              _assert_(adjointx_input);
-	Input* adjointy_input  = basalelement->GetInput(AdjointyEnum);              _assert_(adjointy_input);
-	Input* rheologyb_input = basalelement->GetInput(MaterialsRheologyBbarEnum); _assert_(rheologyb_input);
+	Input2* thickness_input = basalelement->GetInput2(ThicknessEnum);             _assert_(thickness_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);                    _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);                    _assert_(vy_input);
+	Input2* adjointx_input  = basalelement->GetInput2(AdjointxEnum);              _assert_(adjointx_input);
+	Input2* adjointy_input  = basalelement->GetInput2(AdjointyEnum);              _assert_(adjointy_input);
+	Input2* rheologyb_input = basalelement->GetInput2(MaterialsRheologyBbarEnum); _assert_(rheologyb_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1381,6 +1382,6 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* rheology_input = element->GetInput(MaterialsRheologyBEnum);              _assert_(rheology_input);
-	Input* weights_input   = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* rheology_input = element->GetInput2(MaterialsRheologyBEnum);              _assert_(rheology_input);
+	DatasetInput2* weights_input   = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
 	/* Start  looping on the number of gaussian points: */
 	Gauss* gauss=element->NewGauss(2);
@@ -1447,7 +1448,7 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* rheology_input  = element->GetInput(MaterialsRheologyBbarEnum);              _assert_(rheology_input);
-	Input* rheology0_input = element->GetInput(RheologyBInitialguessEnum);              _assert_(rheology0_input);
-	Input* weights_input   = element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* rheology_input  = element->GetInput2(MaterialsRheologyBbarEnum);              _assert_(rheology_input);
+	Input2* rheology0_input = element->GetInput2(RheologyBInitialguessEnum);              _assert_(rheology0_input);
+	DatasetInput2* weights_input   = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1502,13 +1503,13 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input = element->GetInput(ThicknessEnum);             _assert_(thickness_input);
-	Input* vx_input        = element->GetInput(VxEnum);                    _assert_(vx_input);
-	Input* vy_input        = NULL;
-	Input* adjointx_input  = element->GetInput(AdjointxEnum);              _assert_(adjointx_input);
-	Input* adjointy_input  = NULL;
-	Input* rheologyb_input = element->GetInput(MaterialsRheologyBEnum); _assert_(rheologyb_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum);             _assert_(thickness_input);
+	Input2* vx_input        = element->GetInput2(VxEnum);                    _assert_(vx_input);
+	Input2* vy_input        = NULL;
+	Input2* adjointx_input  = element->GetInput2(AdjointxEnum);              _assert_(adjointx_input);
+	Input2* adjointy_input  = NULL;
+	Input2* rheologyb_input = element->GetInput2(MaterialsRheologyBEnum); _assert_(rheologyb_input);
 	if(domaintype!=Domain2DverticalEnum){
-		vy_input        = element->GetInput(VyEnum);                   _assert_(vy_input);
-		adjointy_input  = element->GetInput(AdjointyEnum);             _assert_(adjointy_input);
+		vy_input        = element->GetInput2(VyEnum);                   _assert_(vy_input);
+		adjointy_input  = element->GetInput2(AdjointyEnum);             _assert_(adjointy_input);
 	}
 	/* Start  looping on the number of gaussian points: */
@@ -1597,10 +1598,10 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input = basalelement->GetInput(ThicknessEnum);             _assert_(thickness_input);
-	Input* vx_input        = basalelement->GetInput(VxEnum);                    _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);                    _assert_(vy_input);
-	Input* adjointx_input  = basalelement->GetInput(AdjointxEnum);              _assert_(adjointx_input);
-	Input* adjointy_input  = basalelement->GetInput(AdjointyEnum);              _assert_(adjointy_input);
-	Input* rheologyb_input = basalelement->GetInput(MaterialsRheologyBEnum); _assert_(rheologyb_input);
+	Input2* thickness_input = basalelement->GetInput2(ThicknessEnum);             _assert_(thickness_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);                    _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);                    _assert_(vy_input);
+	Input2* adjointx_input  = basalelement->GetInput2(AdjointxEnum);              _assert_(adjointx_input);
+	Input2* adjointy_input  = basalelement->GetInput2(AdjointyEnum);              _assert_(adjointy_input);
+	Input2* rheologyb_input = basalelement->GetInput2(MaterialsRheologyBEnum); _assert_(rheologyb_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1683,6 +1684,6 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* dragcoefficient_input = basalelement->GetInput(FrictionCoefficientEnum);                _assert_(dragcoefficient_input);
-	Input* weights_input         = basalelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* dragcoefficient_input = basalelement->GetInput2(FrictionCoefficientEnum);                _assert_(dragcoefficient_input);
+	DatasetInput2* weights_input         = basalelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1752,15 +1753,15 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* vx_input        = element->GetInput(VxEnum);                   _assert_(vx_input);
-	Input* vy_input        = element->GetInput(VyEnum);                   _assert_(vy_input);
-	Input* adjointx_input  = element->GetInput(AdjointxEnum);             _assert_(adjointx_input);
-	Input* adjointy_input  = element->GetInput(AdjointyEnum);             _assert_(adjointy_input);
-	Input* vz_input        = NULL;
-	Input* adjointz_input  = NULL;
+	Input2* vx_input        = element->GetInput2(VxEnum);                   _assert_(vx_input);
+	Input2* vy_input        = element->GetInput2(VyEnum);                   _assert_(vy_input);
+	Input2* adjointx_input  = element->GetInput2(AdjointxEnum);             _assert_(adjointx_input);
+	Input2* adjointy_input  = element->GetInput2(AdjointyEnum);             _assert_(adjointy_input);
+	Input2* vz_input        = NULL;
+	Input2* adjointz_input  = NULL;
 	if(domaintype!=Domain2DverticalEnum){
-		vz_input        = element->GetInput(VzEnum);                   _assert_(vy_input);
-		adjointz_input  = element->GetInput(AdjointzEnum);             _assert_(adjointz_input);
-	}
-	Input* dragcoeff_input = element->GetInput(FrictionCoefficientEnum);  _assert_(dragcoeff_input);
+		vz_input        = element->GetInput2(VzEnum);                   _assert_(vy_input);
+		adjointz_input  = element->GetInput2(AdjointzEnum);             _assert_(adjointz_input);
+	}
+	Input2* dragcoeff_input = element->GetInput2(FrictionCoefficientEnum);  _assert_(dragcoeff_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1851,12 +1852,12 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* vx_input        = element->GetInput(VxEnum);                   _assert_(vx_input);
-	Input* vy_input        = NULL;
-	Input* adjointx_input  = element->GetInput(AdjointxEnum);             _assert_(adjointx_input);
-	Input* adjointy_input  = NULL;
-	Input* dragcoeff_input = element->GetInput(FrictionCoefficientEnum);  _assert_(dragcoeff_input);
+	Input2* vx_input        = element->GetInput2(VxEnum);                   _assert_(vx_input);
+	Input2* vy_input        = NULL;
+	Input2* adjointx_input  = element->GetInput2(AdjointxEnum);             _assert_(adjointx_input);
+	Input2* adjointy_input  = NULL;
+	Input2* dragcoeff_input = element->GetInput2(FrictionCoefficientEnum);  _assert_(dragcoeff_input);
 	if(domaintype!=Domain2DverticalEnum){
-		vy_input        = element->GetInput(VyEnum);                   _assert_(vy_input);
-		adjointy_input  = element->GetInput(AdjointyEnum);             _assert_(adjointy_input);
+		vy_input        = element->GetInput2(VyEnum);                   _assert_(vy_input);
+		adjointy_input  = element->GetInput2(AdjointyEnum);             _assert_(adjointy_input);
 	}
 	/* Start  looping on the number of gaussian points: */
@@ -1944,9 +1945,9 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* vx_input        = basalelement->GetInput(VxEnum);                   _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);                   _assert_(vy_input);
-	Input* adjointx_input  = basalelement->GetInput(AdjointxEnum);             _assert_(adjointx_input);
-	Input* adjointy_input  = basalelement->GetInput(AdjointyEnum);             _assert_(adjointy_input);
-	Input* dragcoeff_input = basalelement->GetInput(FrictionCoefficientEnum);  _assert_(dragcoeff_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);                   _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);                   _assert_(vy_input);
+	Input2* adjointx_input  = basalelement->GetInput2(AdjointxEnum);             _assert_(adjointx_input);
+	Input2* adjointy_input  = basalelement->GetInput2(AdjointyEnum);             _assert_(adjointy_input);
+	Input2* dragcoeff_input = basalelement->GetInput2(FrictionCoefficientEnum);  _assert_(dragcoeff_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -2016,13 +2017,13 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* vx_input        = element->GetInput(VxEnum);                   _assert_(vx_input);
-	Input* vy_input        = element->GetInput(VyEnum);                   _assert_(vy_input);
-	Input* adjointx_input  = element->GetInput(AdjointxEnum);             _assert_(adjointx_input);
-	Input* adjointy_input  = element->GetInput(AdjointyEnum);             _assert_(adjointy_input);
-	Input* vz_input        = NULL;
-	Input* adjointz_input  = NULL;
+	Input2* vx_input        = element->GetInput2(VxEnum);                   _assert_(vx_input);
+	Input2* vy_input        = element->GetInput2(VyEnum);                   _assert_(vy_input);
+	Input2* adjointx_input  = element->GetInput2(AdjointxEnum);             _assert_(adjointx_input);
+	Input2* adjointy_input  = element->GetInput2(AdjointyEnum);             _assert_(adjointy_input);
+	Input2* vz_input        = NULL;
+	Input2* adjointz_input  = NULL;
 	if(domaintype!=Domain2DverticalEnum){
-		vz_input        = element->GetInput(VzEnum);                   _assert_(vy_input);
-		adjointz_input  = element->GetInput(AdjointzEnum);             _assert_(adjointz_input);
+		vz_input        = element->GetInput2(VzEnum);                   _assert_(vy_input);
+		adjointz_input  = element->GetInput2(AdjointzEnum);             _assert_(adjointz_input);
 	}
 
@@ -2113,11 +2114,11 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* vx_input        = element->GetInput(VxEnum);                   _assert_(vx_input);
-	Input* vy_input        = NULL;
-	Input* adjointx_input  = element->GetInput(AdjointxEnum);             _assert_(adjointx_input);
-	Input* adjointy_input  = NULL;
+	Input2* vx_input        = element->GetInput2(VxEnum);                   _assert_(vx_input);
+	Input2* vy_input        = NULL;
+	Input2* adjointx_input  = element->GetInput2(AdjointxEnum);             _assert_(adjointx_input);
+	Input2* adjointy_input  = NULL;
 	if(domaintype!=Domain2DverticalEnum){
-		vy_input        = element->GetInput(VyEnum);                   _assert_(vy_input);
-		adjointy_input  = element->GetInput(AdjointyEnum);             _assert_(adjointy_input);
+		vy_input        = element->GetInput2(VyEnum);                   _assert_(vy_input);
+		adjointy_input  = element->GetInput2(AdjointyEnum);             _assert_(adjointy_input);
 	}
 	/* Start  looping on the number of gaussian points: */
@@ -2206,8 +2207,8 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* vx_input        = basalelement->GetInput(VxEnum);          _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);          _assert_(vy_input);
-	Input* adjointx_input  = basalelement->GetInput(AdjointxEnum);    _assert_(adjointx_input);
-	Input* adjointy_input  = basalelement->GetInput(AdjointyEnum);    _assert_(adjointy_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);          _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);          _assert_(vy_input);
+	Input2* adjointx_input  = basalelement->GetInput2(AdjointxEnum);    _assert_(adjointx_input);
+	Input2* adjointy_input  = basalelement->GetInput2(AdjointyEnum);    _assert_(adjointy_input);
 
 	IssmDouble  q_exp;
@@ -2222,9 +2223,9 @@
 
 	/*Recover parameters: */
-	Input* qinput = basalelement->GetInput(FrictionQEnum);
-	Input* cinput = basalelement->GetInput(FrictionCEnum);
-	Input* Asinput = basalelement->GetInput(FrictionAsEnum);
-	Input* nInput =basalelement->GetInput(MaterialsRheologyNEnum);
-	Input* Ninput = basalelement->GetInput(FrictionEffectivePressureEnum);	
+	Input2* qinput = basalelement->GetInput2(FrictionQEnum);
+	Input2* cinput = basalelement->GetInput2(FrictionCEnum);
+	Input2* Asinput = basalelement->GetInput2(FrictionAsEnum);
+	Input2* nInput =basalelement->GetInput2(MaterialsRheologyNEnum);
+	Input2* Ninput = basalelement->GetInput2(FrictionEffectivePressureEnum);	
 	/* Start  looping on the number of gaussian points: */
 	Gauss* gauss=basalelement->NewGauss(4);
@@ -2325,10 +2326,10 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input = basalelement->GetInput(ThicknessEnum);             _assert_(thickness_input);
-	Input* vx_input        = basalelement->GetInput(VxEnum);                    _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);                    _assert_(vy_input);
-	Input* adjointx_input  = basalelement->GetInput(AdjointxEnum);              _assert_(adjointx_input);
-	Input* adjointy_input  = basalelement->GetInput(AdjointyEnum);              _assert_(adjointy_input);
-	Input* rheologyb_input = basalelement->GetInput(MaterialsRheologyBbarEnum); _assert_(rheologyb_input);
+	Input2* thickness_input = basalelement->GetInput2(ThicknessEnum);             _assert_(thickness_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);                    _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);                    _assert_(vy_input);
+	Input2* adjointx_input  = basalelement->GetInput2(AdjointxEnum);              _assert_(adjointx_input);
+	Input2* adjointy_input  = basalelement->GetInput2(AdjointyEnum);              _assert_(adjointy_input);
+	Input2* rheologyb_input = basalelement->GetInput2(MaterialsRheologyBbarEnum); _assert_(rheologyb_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -2450,11 +2451,11 @@
 
 	/*Add vx and vy as inputs to the tria element: */
-	element->AddInput(AdjointxEnum,lambdax,element->VelocityInterpolation());
-	element->AddInput(AdjointyEnum,lambday,element->VelocityInterpolation());
-	if(domaintype!=Domain2DverticalEnum) element->AddInput(AdjointzEnum,lambdaz,element->VelocityInterpolation());
+	element->AddInput2(AdjointxEnum,lambdax,element->VelocityInterpolation());
+	element->AddInput2(AdjointyEnum,lambday,element->VelocityInterpolation());
+	if(domaintype!=Domain2DverticalEnum) element->AddInput2(AdjointzEnum,lambdaz,element->VelocityInterpolation());
 
 	element->FindParam(&fe_FS,FlowequationFeFSEnum);
 	if(fe_FS!=LATaylorHoodEnum && fe_FS!=LACrouzeixRaviartEnum)	
-	 element->AddInput(AdjointpEnum,lambdap,element->PressureInterpolation());	
+	 element->AddInput2(AdjointpEnum,lambdap,element->PressureInterpolation());	
 
 	/*Free ressources:*/
@@ -2507,6 +2508,6 @@
 
 	/*Add vx and vy as inputs to the tria element: */
-	element->AddInput(AdjointxEnum,lambdax,element->GetElementType());
-	element->AddInput(AdjointyEnum,lambday,element->GetElementType());
+	element->AddInput2(AdjointxEnum,lambdax,element->GetElementType());
+	element->AddInput2(AdjointyEnum,lambday,element->GetElementType());
 
 	/*Free ressources:*/
Index: /issm/trunk/src/c/analyses/AdjointHorizAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/AdjointHorizAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/AdjointHorizAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/Analysis.h
===================================================================
--- /issm/trunk/src/c/analyses/Analysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/Analysis.h	(revision 24686)
@@ -16,4 +16,5 @@
 
 class Parameters;
+class Inputs2;
 class IoModel;
 class Elements;
@@ -38,5 +39,5 @@
 		virtual void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false)=0;
 		virtual int  DofsPerNode(int** doflist,int domaintype,int approximation)=0;
-		virtual void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type)=0;
+		virtual void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type)=0;
 		virtual void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum)=0;
 
Index: /issm/trunk/src/c/analyses/Balancethickness2Analysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/Balancethickness2Analysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/Balancethickness2Analysis.cpp	(revision 24686)
@@ -23,5 +23,5 @@
 	return 1;
 }/*}}}*/
-void Balancethickness2Analysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void Balancethickness2Analysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Finite element type*/
@@ -29,14 +29,14 @@
 
 	/*Load variables in element*/
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum);
-	iomodel->FetchDataToInput(elements,"md.balancethickness.thickening_rate",BalancethicknessThickeningRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.balancethickness.thickening_rate",BalancethicknessThickeningRateEnum);
 
 	/*Update elements: */
@@ -45,5 +45,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 
 			counter++;
@@ -82,6 +82,6 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input = element->GetInput(VxEnum); _assert_(vx_input); 
-	Input* vy_input = element->GetInput(VyEnum); _assert_(vy_input);
+	Input2* vx_input = element->GetInput2(VxEnum); _assert_(vx_input); 
+	Input2* vy_input = element->GetInput2(VyEnum); _assert_(vy_input);
 
 	/*Get element characteristic length*/
@@ -142,7 +142,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* ms_input   = element->GetInput(SmbMassBalanceEnum);                _assert_(ms_input);
-	Input* mb_input   = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);       _assert_(mb_input);
-	Input* dhdt_input = element->GetInput(BalancethicknessThickeningRateEnum);            _assert_(dhdt_input);
+	Input2* ms_input   = element->GetInput2(SmbMassBalanceEnum);                _assert_(ms_input);
+	Input2* mb_input   = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);       _assert_(mb_input);
+	Input2* dhdt_input = element->GetInput2(BalancethicknessThickeningRateEnum);            _assert_(dhdt_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/analyses/Balancethickness2Analysis.h
===================================================================
--- /issm/trunk/src/c/analyses/Balancethickness2Analysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/Balancethickness2Analysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/BalancethicknessAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/BalancethicknessAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/BalancethicknessAnalysis.cpp	(revision 24686)
@@ -4,4 +4,5 @@
 #include "../shared/shared.h"
 #include "../modules/modules.h"
+#include "../classes/Inputs2/DatasetInput2.h"
 
 /*Model processing*/
@@ -72,5 +73,5 @@
 	return 1;
 }/*}}}*/
-void BalancethicknessAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void BalancethicknessAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int    stabilization,finiteelement;
@@ -90,23 +91,23 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum);
-	iomodel->FetchDataToInput(elements,"md.balancethickness.thickening_rate",BalancethicknessThickeningRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.balancethickness.thickening_rate",BalancethicknessThickeningRateEnum);
 
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 }/*}}}*/
@@ -171,13 +172,13 @@
 	element->FindParam(&domaintype,DomainTypeEnum);
 	element->FindParam(&stabilization,BalancethicknessStabilizationEnum);
-	Input* vxaverage_input=NULL;
-	Input* vyaverage_input=NULL;
+	Input2* vxaverage_input=NULL;
+	Input2* vyaverage_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		vxaverage_input=element->GetInput(VxEnum); _assert_(vxaverage_input);
-		vyaverage_input=element->GetInput(VyEnum); _assert_(vyaverage_input);
+		vxaverage_input=element->GetInput2(VxEnum); _assert_(vxaverage_input);
+		vyaverage_input=element->GetInput2(VyEnum); _assert_(vyaverage_input);
 	}
 	else{
-		vxaverage_input=element->GetInput(VxAverageEnum); _assert_(vxaverage_input);
-		vyaverage_input=element->GetInput(VyAverageEnum); _assert_(vyaverage_input);
+		vxaverage_input=element->GetInput2(VxAverageEnum); _assert_(vxaverage_input);
+		vyaverage_input=element->GetInput2(VyAverageEnum); _assert_(vyaverage_input);
 	}
 	h = element->CharacteristicLength();
@@ -272,13 +273,13 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&domaintype,DomainTypeEnum);
-	Input* vxaverage_input=NULL;
-	Input* vyaverage_input=NULL;
+	Input2* vxaverage_input=NULL;
+	Input2* vyaverage_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		vxaverage_input=element->GetInput(VxEnum); _assert_(vxaverage_input);
-		vyaverage_input=element->GetInput(VyEnum); _assert_(vyaverage_input);
+		vxaverage_input=element->GetInput2(VxEnum); _assert_(vxaverage_input);
+		vyaverage_input=element->GetInput2(VyEnum); _assert_(vyaverage_input);
 	}
 	else{
-		vxaverage_input=element->GetInput(VxAverageEnum); _assert_(vxaverage_input);
-		vyaverage_input=element->GetInput(VyAverageEnum); _assert_(vyaverage_input);
+		vxaverage_input=element->GetInput2(VxAverageEnum); _assert_(vxaverage_input);
+		vyaverage_input=element->GetInput2(VyAverageEnum); _assert_(vyaverage_input);
 	}
 
@@ -355,7 +356,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* mb_input   = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);       _assert_(mb_input);
-	Input* ms_input   = element->GetInput(SmbMassBalanceEnum);     _assert_(ms_input);
-	Input* dhdt_input = element->GetInput(BalancethicknessThickeningRateEnum); _assert_(dhdt_input);
+	Input2* mb_input   = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);       _assert_(mb_input);
+	Input2* ms_input   = element->GetInput2(SmbMassBalanceEnum);     _assert_(ms_input);
+	Input2* dhdt_input = element->GetInput2(BalancethicknessThickeningRateEnum); _assert_(dhdt_input);
 
 	/*Initialize mb_correction to 0, do not forget!:*/
@@ -396,7 +397,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* mb_input   = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);       _assert_(mb_input);
-	Input* ms_input   = element->GetInput(SmbMassBalanceEnum);     _assert_(ms_input);
-	Input* dhdt_input = element->GetInput(BalancethicknessThickeningRateEnum); _assert_(dhdt_input);
+	Input2* mb_input   = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);       _assert_(mb_input);
+	Input2* ms_input   = element->GetInput2(SmbMassBalanceEnum);     _assert_(ms_input);
+	Input2* dhdt_input = element->GetInput2(BalancethicknessThickeningRateEnum); _assert_(dhdt_input);
 
 	/*Initialize mb_correction to 0, do not forget!:*/
@@ -513,12 +514,12 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GradientIndexing(&vertexpidlist[0],control_index);
-	Input* thickness_input            = element->GetInput(ThicknessEnum);                           _assert_(thickness_input);
-	Input* thicknessobs_input         = element->GetInput(InversionThicknessObsEnum);               _assert_(thicknessobs_input);
-	Input* weights_input              = element->GetInput(InversionCostFunctionsCoefficientsEnum);  _assert_(weights_input);
-	Input* vx_input                   = element->GetInput(VxEnum);                                  _assert_(vx_input);
-	Input* vy_input                   = element->GetInput(VyEnum);                                  _assert_(vy_input);
-	Input* surface_mass_balance_input = element->GetInput(SmbMassBalanceEnum);          _assert_(surface_mass_balance_input);
-	Input* basal_melting_input        = element->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(basal_melting_input);
-	Input* dhdt_input                 = element->GetInput(BalancethicknessThickeningRateEnum);      _assert_(dhdt_input);
+	DatasetInput2* weights_input       = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum);  _assert_(weights_input);
+	Input2* thickness_input            = element->GetInput2(ThicknessEnum);                           _assert_(thickness_input);
+	Input2* thicknessobs_input         = element->GetInput2(InversionThicknessObsEnum);               _assert_(thicknessobs_input);
+	Input2* vx_input                   = element->GetInput2(VxEnum);                                  _assert_(vx_input);
+	Input2* vy_input                   = element->GetInput2(VyEnum);                                  _assert_(vy_input);
+	Input2* surface_mass_balance_input = element->GetInput2(SmbMassBalanceEnum);          _assert_(surface_mass_balance_input);
+	Input2* basal_melting_input        = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(basal_melting_input);
+	Input2* dhdt_input                 = element->GetInput2(BalancethicknessThickeningRateEnum);      _assert_(dhdt_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/analyses/BalancethicknessAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/BalancethicknessAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/BalancethicknessAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/BalancethicknessSoftAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/BalancethicknessSoftAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/BalancethicknessSoftAnalysis.cpp	(revision 24686)
@@ -12,5 +12,5 @@
 	   _error_("not implemented yet");
 }/*}}}*/
-void BalancethicknessSoftAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void BalancethicknessSoftAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 	   _error_("not implemented yet");
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/BalancethicknessSoftAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/BalancethicknessSoftAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/BalancethicknessSoftAnalysis.h	(revision 24686)
@@ -15,5 +15,5 @@
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		void CreateConstraints(Constraints* constraints,IoModel* iomodel);
Index: /issm/trunk/src/c/analyses/BalancevelocityAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/BalancevelocityAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/BalancevelocityAnalysis.cpp	(revision 24686)
@@ -26,5 +26,5 @@
 	return 1;
 }/*}}}*/
-void BalancevelocityAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void BalancevelocityAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -33,23 +33,23 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum);
-	iomodel->FetchDataToInput(elements,"md.balancethickness.thickening_rate",BalancethicknessThickeningRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.balancethickness.thickening_rate",BalancethicknessThickeningRateEnum);
 
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 }/*}}}*/
@@ -81,5 +81,4 @@
 	/*Initialize Element matrix and vectors*/
 	ElementMatrix* Ke     = element->NewElementMatrix();
-	IssmDouble*    B      = xNew<IssmDouble>(2*numnodes);
 	IssmDouble*    basis  = xNew<IssmDouble>(numnodes);
 	IssmDouble*    dbasis = xNew<IssmDouble>(2*numnodes);
@@ -92,5 +91,5 @@
 	/*Retrieve all Inputs and parameters: */
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* H_input = element->GetInput(ThicknessEnum); _assert_(H_input);
+	Input2* H_input = element->GetInput2(ThicknessEnum); _assert_(H_input);
 	h = element->CharacteristicLength();
 
@@ -142,5 +141,4 @@
 	xDelete<IssmDouble>(HNx);
 	xDelete<IssmDouble>(HNy);
-	xDelete<IssmDouble>(B);
 	delete gauss;
 	return Ke;
@@ -184,8 +182,8 @@
 	/*Retrieve all inputs and parameters*/
 	basalelement->GetVerticesCoordinates(&xyz_list);
-	Input* ms_input   = basalelement->GetInput(SmbMassBalanceEnum);          _assert_(ms_input);
-	Input* mb_input   = basalelement->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(mb_input);
-	Input* dhdt_input = basalelement->GetInput(BalancethicknessThickeningRateEnum);      _assert_(dhdt_input);
-	Input* H_input    = basalelement->GetInput(ThicknessEnum);                           _assert_(H_input);
+	Input2* ms_input   = basalelement->GetInput2(SmbMassBalanceEnum);          _assert_(ms_input);
+	Input2* mb_input   = basalelement->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(mb_input);
+	Input2* dhdt_input = basalelement->GetInput2(BalancethicknessThickeningRateEnum);      _assert_(dhdt_input);
+	Input2* H_input    = basalelement->GetInput2(ThicknessEnum);                           _assert_(H_input);
 	IssmDouble h = basalelement->CharacteristicLength();
 
Index: /issm/trunk/src/c/analyses/BalancevelocityAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/BalancevelocityAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/BalancevelocityAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/DamageEvolutionAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/DamageEvolutionAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/DamageEvolutionAnalysis.cpp	(revision 24686)
@@ -40,5 +40,5 @@
 	return 1;
 }/*}}}*/
-void DamageEvolutionAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void DamageEvolutionAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int finiteelement;
@@ -54,5 +54,10 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
+
+			/*Need to know the type of approximation for this element*/
+			if(iomodel->Data("md.flowequation.element_equation")){
+				inputs2->SetInput(ApproximationEnum,counter,IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[i])));
+			}
 			counter++;
 		}
@@ -63,18 +68,15 @@
 	for(int i=0;i<elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-		int numvertices = element->GetNumberOfVertices();
-		IssmDouble* values = xNewZeroInit<IssmDouble>(numvertices);
-		element->AddInput(DamageFEnum,values,P1Enum);
-		xDelete<IssmDouble>(values);
+		element->SetElementInput(inputs2,DamageFEnum,0.);
 	}
 
 
 	/*What input do I need to run my damage evolution model?*/
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	if(iomodel->domaintype==Domain3DEnum) iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum);
-	iomodel->FetchDataToInput(elements,"md.damage.D",DamageDEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	if(iomodel->domaintype==Domain3DEnum) iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.damage.D",DamageDEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
 
 }/*}}}*/
@@ -137,5 +139,5 @@
 
 	/*Add input*/
-	element->AddInput(DamageFEnum,f,element->GetElementType());
+	element->AddInput2(DamageFEnum,f,element->GetElementType());
 
 	/*Clean up and return*/
@@ -172,13 +174,13 @@
 	element->ComputeDeviatoricStressTensor();
 
-	Input* principalDevStress1_input = element->GetInput(DeviatoricStress1Enum);     _assert_(principalDevStress1_input);
-	Input* principalDevStress2_input = element->GetInput(DeviatoricStress2Enum);     _assert_(principalDevStress2_input);
-
-	Input* damage_input = NULL;
+	Input2* principalDevStress1_input = element->GetInput2(DeviatoricStress1Enum);     _assert_(principalDevStress1_input);
+	Input2* principalDevStress2_input = element->GetInput2(DeviatoricStress2Enum);     _assert_(principalDevStress2_input);
+
+	Input2* damage_input = NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		damage_input = element->GetInput(DamageDbarEnum); 	_assert_(damage_input);
+		damage_input = element->GetInput2(DamageDbarEnum); 	_assert_(damage_input);
 	}
 	else{
-		damage_input = element->GetInput(DamageDEnum);   _assert_(damage_input);
+		damage_input = element->GetInput2(DamageDEnum);   _assert_(damage_input);
 	}
 
@@ -220,5 +222,5 @@
 
 	/*Add input*/
-	element->AddInput(DamageFEnum,f,element->GetElementType());
+	element->AddInput2(DamageFEnum,f,element->GetElementType());
 
 	/*Clean up and return*/
@@ -248,17 +250,17 @@
 
 	/*retrieve what we need: */
-	Input* eps_xx_input  = element->GetInput(StrainRatexxEnum);     _assert_(eps_xx_input);
-	Input* eps_xy_input  = element->GetInput(StrainRatexyEnum);     _assert_(eps_xy_input);
-	Input* eps_yy_input  = element->GetInput(StrainRateyyEnum);     _assert_(eps_yy_input);
-	Input*  n_input=element->GetInput(MaterialsRheologyNEnum); _assert_(n_input);
-	Input* damage_input = NULL;
-	Input* B_input = NULL;
+	Input2* eps_xx_input  = element->GetInput2(StrainRatexxEnum);     _assert_(eps_xx_input);
+	Input2* eps_xy_input  = element->GetInput2(StrainRatexyEnum);     _assert_(eps_xy_input);
+	Input2* eps_yy_input  = element->GetInput2(StrainRateyyEnum);     _assert_(eps_yy_input);
+	Input2*  n_input=element->GetInput2(MaterialsRheologyNEnum); _assert_(n_input);
+	Input2* damage_input = NULL;
+	Input2* B_input = NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		damage_input = element->GetInput(DamageDbarEnum); 	_assert_(damage_input);
-		B_input=element->GetInput(MaterialsRheologyBbarEnum); _assert_(B_input);
+		damage_input = element->GetInput2(DamageDbarEnum); 	_assert_(damage_input);
+		B_input=element->GetInput2(MaterialsRheologyBbarEnum); _assert_(B_input);
 	}
 	else{
-		damage_input = element->GetInput(DamageDEnum);   _assert_(damage_input);
-		B_input=element->GetInput(MaterialsRheologyBEnum); _assert_(B_input);
+		damage_input = element->GetInput2(DamageDEnum);   _assert_(damage_input);
+		B_input=element->GetInput2(MaterialsRheologyBEnum); _assert_(B_input);
 	}
 
@@ -295,5 +297,5 @@
 
 	/*Add input*/
-	element->AddInput(DamageFEnum,f,element->GetElementType());
+	element->AddInput2(DamageFEnum,f,P1DGEnum);
 
 	/*Clean up and return*/
@@ -335,23 +337,23 @@
 	}
 	/*retrieve what we need: */
-	Input* tau_xx_input  = element->GetInput(DeviatoricStressxxEnum);     _assert_(tau_xx_input);
-	Input* tau_xy_input  = element->GetInput(DeviatoricStressxyEnum);     _assert_(tau_xy_input);
-	Input* tau_yy_input  = element->GetInput(DeviatoricStressyyEnum);     _assert_(tau_yy_input);
-	Input* tau_xz_input  = NULL;
-	Input* tau_yz_input  = NULL;
-	Input* tau_zz_input  = NULL;
-	Input* stressMaxPrincipal_input = NULL;
+	Input2* tau_xx_input  = element->GetInput2(DeviatoricStressxxEnum);     _assert_(tau_xx_input);
+	Input2* tau_xy_input  = element->GetInput2(DeviatoricStressxyEnum);     _assert_(tau_xy_input);
+	Input2* tau_yy_input  = element->GetInput2(DeviatoricStressyyEnum);     _assert_(tau_yy_input);
+	Input2* tau_xz_input  = NULL;
+	Input2* tau_yz_input  = NULL;
+	Input2* tau_zz_input  = NULL;
+	Input2* stressMaxPrincipal_input = NULL;
 	if(dim==3){
-		tau_xz_input  = element->GetInput(DeviatoricStressxzEnum);     _assert_(tau_xz_input);
-		tau_yz_input  = element->GetInput(DeviatoricStressyzEnum);     _assert_(tau_yz_input);
-		tau_zz_input  = element->GetInput(DeviatoricStresszzEnum);     _assert_(tau_zz_input);
-		stressMaxPrincipal_input = element->GetInput(StressMaxPrincipalEnum); _assert_(stressMaxPrincipal_input);
-	}
-	Input* damage_input = NULL;
+		tau_xz_input  = element->GetInput2(DeviatoricStressxzEnum);     _assert_(tau_xz_input);
+		tau_yz_input  = element->GetInput2(DeviatoricStressyzEnum);     _assert_(tau_yz_input);
+		tau_zz_input  = element->GetInput2(DeviatoricStresszzEnum);     _assert_(tau_zz_input);
+		stressMaxPrincipal_input = element->GetInput2(StressMaxPrincipalEnum); _assert_(stressMaxPrincipal_input);
+	}
+	Input2* damage_input = NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		damage_input = element->GetInput(DamageDbarEnum); 	_assert_(damage_input);
+		damage_input = element->GetInput2(DamageDbarEnum); 	_assert_(damage_input);
 	}
 	else{
-		damage_input = element->GetInput(DamageDEnum);   _assert_(damage_input);
+		damage_input = element->GetInput2(DamageDEnum);   _assert_(damage_input);
 	}
 
@@ -414,5 +416,5 @@
 	}
 	/*Add input*/
-	element->AddInput(DamageFEnum,f,element->GetElementType());
+	element->AddInput2(DamageFEnum,f,P1DGEnum);
 
 	/*Clean up and return*/
@@ -461,9 +463,9 @@
 	//printf("dt %f\n", dt);
 	element->FindParam(&stabilization,DamageStabilizationEnum);
-	Input* vx_input = element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input = NULL;
+	Input2* vx_input = element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input = NULL;
 	if(dim==3){
-		vz_input=element->GetInput(VzEnum); _assert_(vz_input);
+		vz_input=element->GetInput2(VzEnum); _assert_(vz_input);
 	}
 
@@ -634,11 +636,11 @@
 	}
 
-	Input* damaged_input = NULL;
-	Input* damagef_input = element->GetInput(DamageFEnum); _assert_(damagef_input);
+	Input2* damaged_input = NULL;
+	Input2* damagef_input = element->GetInput2(DamageFEnum); _assert_(damagef_input);
 	if(domaintype==Domain2DhorizontalEnum){
-		damaged_input = element->GetInput(DamageDbarEnum); _assert_(damaged_input);
+		damaged_input = element->GetInput2(DamageDbarEnum); _assert_(damaged_input);
 	}
 	else{
-		damaged_input = element->GetInput(DamageDEnum); _assert_(damaged_input);
+		damaged_input = element->GetInput2(DamageDEnum); _assert_(damaged_input);
 	}
 
@@ -755,8 +757,8 @@
 	element->FindParam(&domaintype,DomainTypeEnum);
 	if(domaintype==Domain2DhorizontalEnum){
-		element->AddInput(DamageDbarEnum,newdamage,element->GetElementType());
+		element->AddInput2(DamageDbarEnum,newdamage,element->GetElementType());
 	}
 	else{
-		element->AddInput(DamageDEnum,newdamage,element->GetElementType());
+		element->AddInput2(DamageDEnum,newdamage,element->GetElementType());
 	}
 
@@ -792,6 +794,6 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vxaverage_input=element->GetInput(VxEnum); _assert_(vxaverage_input);
-	Input* vyaverage_input=element->GetInput(VyEnum); _assert_(vyaverage_input);
+	Input2* vxaverage_input=element->GetInput2(VxEnum); _assert_(vxaverage_input);
+	Input2* vyaverage_input=element->GetInput2(VyEnum); _assert_(vyaverage_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/analyses/DamageEvolutionAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/DamageEvolutionAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/DamageEvolutionAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/DepthAverageAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/DepthAverageAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/DepthAverageAnalysis.cpp	(revision 24686)
@@ -18,5 +18,5 @@
 	return 1;
 }/*}}}*/
-void DepthAverageAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void DepthAverageAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int counter=0;
@@ -24,5 +24,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -30,5 +30,5 @@
 
 	if(iomodel->domaintype==Domain2DverticalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
 	}
 }/*}}}*/
@@ -114,5 +114,5 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&input_enum,InputToDepthaverageInEnum);
-	Input* input = element->GetInput(input_enum); _assert_(input);
+	Input2* input = element->GetInput2(input_enum); _assert_(input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/analyses/DepthAverageAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/DepthAverageAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/DepthAverageAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/EnthalpyAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/EnthalpyAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/EnthalpyAnalysis.cpp	(revision 24686)
@@ -99,5 +99,5 @@
 	return 1;
 }/*}}}*/
-void EnthalpyAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void EnthalpyAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	bool dakota_analysis,ismovingfront,isenthalpy;
@@ -124,5 +124,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
@@ -134,28 +134,28 @@
 	iomodel->FindConstant(&materialstype,"md.materials.type");
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.waterfraction",WaterfractionEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.enthalpy",EnthalpyEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.watercolumn",WatercolumnEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum);
-	InputUpdateFromConstantx(elements,0.,VxMeshEnum);
-	InputUpdateFromConstantx(elements,0.,VyMeshEnum);
-	InputUpdateFromConstantx(elements,0.,VzMeshEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.waterfraction",WaterfractionEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.enthalpy",EnthalpyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.watercolumn",WatercolumnEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,VxMeshEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,VyMeshEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,VzMeshEnum);
 	if(ismovingfront){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum); // required for updating active nodes
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum); // required for updating active nodes
 	}
 
@@ -166,24 +166,24 @@
 			break;
 		default:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
 			break;
 	}
 
 	/*Rheology type*/
-	iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
 	switch(materialstype){
 		case MatenhancediceEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
 			break;
 		case MatdamageiceEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
 			break;
 		case MatestarEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
 			break;
 		case MaticeEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
 			break;
 		default:
@@ -195,65 +195,65 @@
 		case 1:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 2:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
 			break;
 		case 3:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.As",FrictionAsEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 4:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
 			break;
 		case 5:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.water_layer",FrictionWaterLayerEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
 			break;
 		case 6:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			break;
 		case 7:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 9:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(elements,1.,FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
+			InputUpdateFromConstantx(inputs2,elements,1.,FrictionPEnum);
+			InputUpdateFromConstantx(inputs2,elements,1.,FrictionQEnum);
 			break;
 		default:
@@ -287,12 +287,19 @@
 	int frictionlaw;
 	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	if(frictionlaw==4 || frictionlaw==6){
+	if(frictionlaw==6){
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 	}
-	if(frictionlaw==3 || frictionlaw==1 || frictionlaw==7){
+	if(frictionlaw==4){
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+	}
+	if(frictionlaw==1 || frictionlaw==3 || frictionlaw==7){
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 	}
 	if(frictionlaw==9){
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));		
 		parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
 	}
@@ -318,5 +325,5 @@
 
 	/*Get parameters and inputs: */
-	Input* pressure_input		 = element->GetInput(PressureEnum);							 _assert_(pressure_input);
+	Input2* pressure_input		 = element->GetInput2(PressureEnum);							 _assert_(pressure_input);
 
 	/*Fetch indices of basal & surface nodes for this finite element*/
@@ -400,10 +407,10 @@
 
 	/*retrieve inputs*/
-	Input* enthalpy_input         = element->GetInput(enthalpy_enum);                    _assert_(enthalpy_input);
-	Input* pressure_input			= element->GetInput(PressureEnum);							 _assert_(pressure_input);
-	Input* geothermalflux_input   = element->GetInput(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
-	Input* vx_input               = element->GetInput(VxEnum);                          _assert_(vx_input);
-	Input* vy_input               = element->GetInput(VyEnum);                          _assert_(vy_input);
-	Input* vz_input               = element->GetInput(VzEnum);                          _assert_(vz_input);
+	Input2* enthalpy_input       = element->GetInput2(enthalpy_enum);                   _assert_(enthalpy_input);
+	Input2* pressure_input       = element->GetInput2(PressureEnum);                    _assert_(pressure_input);
+	Input2* geothermalflux_input = element->GetInput2(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
+	Input2* vx_input             = element->GetInput2(VxEnum);                          _assert_(vx_input);
+	Input2* vy_input             = element->GetInput2(VyEnum);                          _assert_(vy_input);
+	Input2* vz_input             = element->GetInput2(VzEnum);                          _assert_(vz_input);
 
 	/*Build friction element, needed later: */
@@ -502,9 +509,10 @@
 
 	/*feed updated variables back into model*/
+	int finite_element = element->GetElementType(); if(finite_element==P1Enum) finite_element = P1DGEnum;
 	if(dt!=0.){
-		element->AddInput(enthalpy_enum,enthalpies,element->GetElementType());
-		element->AddInput(WatercolumnEnum,watercolumns,element->GetElementType());
-	}
-	element->AddInput(BasalforcingsGroundediceMeltingRateEnum,basalmeltingrates,element->GetElementType());
+		element->AddInput2(enthalpy_enum,enthalpies,finite_element);
+		element->AddInput2(WatercolumnEnum,watercolumns,finite_element);
+	}
+	element->AddInput2(BasalforcingsGroundediceMeltingRateEnum,basalmeltingrates,P1DGEnum);
 
 	/*Clean up and return*/
@@ -595,10 +603,10 @@
 	IssmDouble  heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
 	IssmDouble  thermalconductivity = element->FindParam(MaterialsThermalconductivityEnum);
-	Input* vx_input  = element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input  = element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input  = element->GetInput(VzEnum);     _assert_(vz_input);
-	Input* vxm_input = element->GetInput(VxMeshEnum); _assert_(vxm_input);
-	Input* vym_input = element->GetInput(VyMeshEnum); _assert_(vym_input);
-	Input* vzm_input = element->GetInput(VzMeshEnum); _assert_(vzm_input);
+	Input2* vx_input  = element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input  = element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input  = element->GetInput2(VzEnum);     _assert_(vz_input);
+	Input2* vxm_input = element->GetInput2(VxMeshEnum); _assert_(vxm_input);
+	Input2* vym_input = element->GetInput2(VyMeshEnum); _assert_(vym_input);
+	Input2* vzm_input = element->GetInput2(VzMeshEnum); _assert_(vzm_input);
 
 	/*Enthalpy diffusion parameter*/
@@ -818,11 +826,13 @@
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&stabilization,ThermalStabilizationEnum);
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input=element->GetInput(VzEnum); _assert_(vz_input);
-	Input* enthalpypicard_input=element->GetInput(EnthalpyPicardEnum); _assert_(enthalpypicard_input);
-	Input* pressure_input=element->GetInput(PressureEnum); _assert_(pressure_input);
-	Input* enthalpy_input=NULL;
-	if(reCast<bool,IssmDouble>(dt)){enthalpy_input = element->GetInput(EnthalpyEnum); _assert_(enthalpy_input);}
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input=element->GetInput2(VzEnum); _assert_(vz_input);
+	Input2* enthalpypicard_input=element->GetInput2(EnthalpyPicardEnum); _assert_(enthalpypicard_input);
+	Input2* pressure_input=element->GetInput2(PressureEnum); _assert_(pressure_input);
+	Input2* enthalpy_input=NULL;
+	if(dt>0.){
+		enthalpy_input = element->GetInput2(EnthalpyEnum); _assert_(enthalpy_input);
+	}
 
 	/* Start  looping on the number of gaussian points: */
@@ -863,5 +873,5 @@
 
 		/* Build transient now */
-		if(reCast<bool,IssmDouble>(dt)){
+		if(dt>0.){
 			enthalpy_input->GetInputValue(&enthalpy, gauss);
 			scalar_transient=enthalpy*Jdet*gauss->weight;
@@ -890,12 +900,12 @@
 			element->ElementSizes(&hx,&hy,&hz);
 			kappa=this->EnthalpyDiffusionParameterVolume(element,EnthalpyPicardEnum); _assert_(kappa>=0.);
-                        vx_input->GetInputValue(&u,gauss);
-                        vy_input->GetInputValue(&v,gauss);
-                        vz_input->GetInputValue(&w,gauss);
-                        element->StabilizationParameterAnisotropic(&tau_parameter_anisotropic[0],u,v,w,hx,hy,hz,kappa/rho_ice);
-                        tau_parameter_hor=tau_parameter_anisotropic[0];
-                        tau_parameter_ver=tau_parameter_anisotropic[1];
-                        
-                        for(i=0;i<numnodes;i++) pe->values[i]+=scalar_def*(tau_parameter_hor*u*dbasis[0*numnodes+i]+tau_parameter_hor*v*dbasis[1*numnodes+i]+tau_parameter_ver*w*dbasis[2*numnodes+i]);
+			vx_input->GetInputValue(&u,gauss);
+			vy_input->GetInputValue(&v,gauss);
+			vz_input->GetInputValue(&w,gauss);
+			element->StabilizationParameterAnisotropic(&tau_parameter_anisotropic[0],u,v,w,hx,hy,hz,kappa/rho_ice);
+			tau_parameter_hor=tau_parameter_anisotropic[0];
+			tau_parameter_ver=tau_parameter_anisotropic[1];
+
+			for(i=0;i<numnodes;i++) pe->values[i]+=scalar_def*(tau_parameter_hor*u*dbasis[0*numnodes+i]+tau_parameter_hor*v*dbasis[1*numnodes+i]+tau_parameter_ver*w*dbasis[2*numnodes+i]);
 		}
 	}
@@ -940,12 +950,12 @@
 	if(dt==0. && !converged) enthalpy_enum=EnthalpyPicardEnum; // use enthalpy from last iteration
 	else enthalpy_enum=EnthalpyEnum; // use enthalpy from last time step
-	Input* vx_input             = element->GetInput(VxEnum);                          _assert_(vx_input);
-	Input* vy_input             = element->GetInput(VyEnum);                          _assert_(vy_input);
-	Input* vz_input             = element->GetInput(VzEnum);                          _assert_(vz_input);
-	Input* enthalpy_input		 = element->GetInput(enthalpy_enum);					 _assert_(enthalpy_input);
-	Input* pressure_input		 = element->GetInput(PressureEnum);							 _assert_(pressure_input);
-	Input* watercolumn_input	 = element->GetInput(WatercolumnEnum);							 _assert_(watercolumn_input);
-	Input* meltingrate_input	 = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);							 _assert_(meltingrate_input);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
+	Input2* vx_input             = element->GetInput2(VxEnum);                          _assert_(vx_input);
+	Input2* vy_input             = element->GetInput2(VyEnum);                          _assert_(vy_input);
+	Input2* vz_input             = element->GetInput2(VzEnum);                          _assert_(vz_input);
+	Input2* enthalpy_input		 = element->GetInput2(enthalpy_enum);					 _assert_(enthalpy_input);
+	Input2* pressure_input		 = element->GetInput2(PressureEnum);							 _assert_(pressure_input);
+	Input2* watercolumn_input	 = element->GetInput2(WatercolumnEnum);							 _assert_(watercolumn_input);
+	Input2* meltingrate_input	 = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);							 _assert_(meltingrate_input);
+	Input2* geothermalflux_input = element->GetInput2(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
 	IssmDouble  rho_ice			 = element->FindParam(MaterialsRhoIceEnum);
 
@@ -1033,5 +1043,5 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input*      pressure_input=element->GetInput(PressureEnum); _assert_(pressure_input);
+	Input2*      pressure_input=element->GetInput2(PressureEnum); _assert_(pressure_input);
 	IssmDouble  gravity             = element->FindParam(ConstantsGEnum);
 	IssmDouble  rho_water           = element->FindParam(MaterialsRhoSeawaterEnum);
@@ -1088,5 +1098,6 @@
 			drainage[k]=DrainageFunctionWaterfraction(waterfractions[k], dt);
 		}
-		element->AddInput(WaterfractionDrainageEnum,drainage,element->GetElementType());
+		int finite_element = element->GetElementType(); if(finite_element==P1Enum) finite_element = P1DGEnum;
+		element->AddInput2(WaterfractionDrainageEnum,drainage,finite_element);
 
 		xDelete<IssmDouble>(waterfractions);
@@ -1121,5 +1132,6 @@
 			drainage_int[k]*=thicknesses[k];
 		}
-		element->AddInput(WaterfractionDrainageIntegratedEnum, drainage_int, element->GetElementType());
+		int finite_element = element->GetElementType(); if(finite_element==P1Enum) finite_element = P1DGEnum;
+		element->AddInput2(WaterfractionDrainageIntegratedEnum, drainage_int,finite_element);
 
 		xDelete<IssmDouble>(drainage_int);
@@ -1144,5 +1156,6 @@
 			watercolumn[basalnodeindices[k]]+=dt*drainage_int[basalnodeindices[k]];
 		}
-		element->AddInput(WatercolumnEnum, watercolumn, element->GetElementType());
+		int finite_element = element->GetElementType(); if(finite_element==P1Enum) finite_element = P1DGEnum;
+		element->AddInput2(WatercolumnEnum, watercolumn,finite_element);
 
 		xDelete<IssmDouble>(watercolumn);
@@ -1180,6 +1193,7 @@
 			element->ThermalToEnthalpy(&enthalpies[k], temperatures[k], waterfractions[k], pressures[k]);
 		}
-		element->AddInput(WaterfractionEnum,waterfractions,element->GetElementType());
-		element->AddInput(EnthalpyEnum,enthalpies,element->GetElementType());
+		int finite_element = element->GetElementType(); if(finite_element==P1Enum) finite_element = P1DGEnum;
+		element->AddInput2(WaterfractionEnum,waterfractions,finite_element);
+		element->AddInput2(EnthalpyEnum,enthalpies,finite_element);
 
 		xDelete<IssmDouble>(enthalpies);
@@ -1366,8 +1380,8 @@
 
 	/*Get parameters and inputs: */
-	Input* enthalpy_input		 = element->GetInput(EnthalpyPicardEnum);					 _assert_(enthalpy_input);
-	Input* pressure_input		 = element->GetInput(PressureEnum);							 _assert_(pressure_input);
-	Input* watercolumn_input	 = element->GetInput(WatercolumnEnum);							 _assert_(watercolumn_input);
-	Input* meltingrate_input	 = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);							 _assert_(meltingrate_input);
+	Input2* enthalpy_input		 = element->GetInput2(EnthalpyPicardEnum);					 _assert_(enthalpy_input);
+	Input2* pressure_input		 = element->GetInput2(PressureEnum);							 _assert_(pressure_input);
+	Input2* watercolumn_input	 = element->GetInput2(WatercolumnEnum);							 _assert_(watercolumn_input);
+	Input2* meltingrate_input	 = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);							 _assert_(meltingrate_input);
 
 	/*Fetch indices of basal & surface nodes for this finite element*/
@@ -1437,8 +1451,8 @@
 
 	/*Get parameters and inputs: */
-	Input* enthalpy_input       = element->GetInput(EnthalpyEnum);                    _assert_(enthalpy_input); //TODO: check EnthalpyPicard?
-	Input* pressure_input		 = element->GetInput(PressureEnum);							 _assert_(pressure_input);
-	Input* watercolumn_input	 = element->GetInput(WatercolumnEnum);							 _assert_(watercolumn_input);
-	Input* meltingrate_input	 = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);							 _assert_(meltingrate_input);
+	Input2* enthalpy_input    = element->GetInput2(EnthalpyEnum);                            _assert_(enthalpy_input); //TODO: check EnthalpyPicard?
+	Input2* pressure_input    = element->GetInput2(PressureEnum);                            _assert_(pressure_input);
+	Input2* watercolumn_input = element->GetInput2(WatercolumnEnum);                         _assert_(watercolumn_input);
+	Input2* meltingrate_input = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(meltingrate_input);
 
 	/*Fetch indices of basal & surface nodes for this finite element*/
@@ -1602,4 +1616,5 @@
 	element->GetInputValue(&converged,ConvergedEnum);
 	element->GetInputListOnNodes(&pressure[0],PressureEnum);
+	int finite_element = element->GetElementType(); if(finite_element==P1Enum) finite_element = P1DGEnum;
 	if(converged){
 		for(i=0;i<numnodes;i++){
@@ -1608,7 +1623,7 @@
 			//if(waterfraction[i]>1.) _error_("Water fraction >1 found in solution vector");
 		}
-		element->AddInput(EnthalpyEnum,values,element->GetElementType());
-		element->AddInput(WaterfractionEnum,waterfraction,element->GetElementType());
-		element->AddInput(TemperatureEnum,temperature,element->GetElementType());
+		element->AddInput2(EnthalpyEnum,values,finite_element);
+		element->AddInput2(WaterfractionEnum,waterfraction,finite_element);
+		element->AddInput2(TemperatureEnum,temperature,finite_element);
 
 		IssmDouble* n = xNew<IssmDouble>(numnodes);
@@ -1630,36 +1645,36 @@
 			case BuddJackaEnum:
 				for(i=0;i<numnodes;i++) B[i]=BuddJacka(temperature[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 			case CuffeyEnum:
 				for(i=0;i<numnodes;i++) B[i]=Cuffey(temperature[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 			case CuffeyTemperateEnum:
 				for(i=0;i<numnodes;i++) B[i]=CuffeyTemperate(temperature[i], waterfraction[i],n[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 			case PatersonEnum:
 				for(i=0;i<numnodes;i++) B[i]=Paterson(temperature[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 			case NyeH2OEnum:
 				for(i=0;i<numnodes;i++) B[i]=NyeH2O(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 			case NyeCO2Enum:
 				for(i=0;i<numnodes;i++) B[i]=NyeCO2(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 			case ArrheniusEnum:{
 				element->GetVerticesCoordinates(&xyz_list);
 				for(i=0;i<numnodes;i++) B[i]=Arrhenius(temperature[i],surface[i]-xyz_list[i*3+2],n[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element);
 				break;
 				}
 			case LliboutryDuvalEnum:{
-				for(i=0;i<numnodes;i++) B[i]=LliboutryDuval(values[i],pressure[i],n[i],element->FindParam(MaterialsBetaEnum),element->FindParam(ConstantsReferencetemperatureEnum),element->FindParam(MaterialsHeatcapacityEnum),element->FindParam(MaterialsLatentheatEnum));
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
-				break;
+				for(i=0;i<numnodes;i++) B[i]=LliboutryDuval(values[i],pressure[i],n[i],element->FindParam(MaterialsBetaEnum),element->FindParam(ConstantsReferencetemperatureEnum),element->FindParam(MaterialsHeatcapacityEnum),element->FindParam(MaterialsLatentheatEnum)); 
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],finite_element); 
+				break; 
 				}
 			default: _error_("Rheology law " << EnumToStringx(rheology_law) << " not supported yet");
@@ -1668,5 +1683,5 @@
 	}
 	else{
-		element->AddInput(EnthalpyPicardEnum,values,element->GetElementType());
+		element->AddInput2(EnthalpyPicardEnum,values,finite_element);
 	}
 
@@ -1691,6 +1706,10 @@
 	femmodel->parameters->FindParam(&isdrainicecolumn,ThermalIsdrainicecolumnEnum);
 
-	if(isdrainicecolumn)	DrainWaterfraction(femmodel);
-	if(computebasalmeltingrates)	ComputeBasalMeltingrate(femmodel);
+	if(isdrainicecolumn){
+		DrainWaterfraction(femmodel);
+	}
+	if(computebasalmeltingrates){
+		ComputeBasalMeltingrate(femmodel);
+	}
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/EnthalpyAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/EnthalpyAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/EnthalpyAnalysis.h	(revision 24686)
@@ -18,5 +18,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/EnumToAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/EnumToAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/EnumToAnalysis.h	(revision 24686)
@@ -10,5 +10,5 @@
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		void CreateConstraints(Constraints* constraints,IoModel* iomodel);
Index: /issm/trunk/src/c/analyses/EsaAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/EsaAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/EsaAnalysis.cpp	(revision 24686)
@@ -18,5 +18,5 @@
 	return 1;
 }/*}}}*/
-void EsaAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void EsaAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -25,5 +25,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -31,6 +31,6 @@
 
 	/*Create inputs: */
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.esa.deltathickness",EsaDeltathicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.esa.deltathickness",EsaDeltathicknessEnum);
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/EsaAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/EsaAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/EsaAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/ExtrapolationAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/ExtrapolationAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/ExtrapolationAnalysis.cpp	(revision 24686)
@@ -26,5 +26,5 @@
 }
 /*}}}*/
-void ExtrapolationAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void ExtrapolationAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 	int    finiteelement;
 
@@ -37,11 +37,11 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 }
@@ -116,14 +116,13 @@
 
 	/*Initialize Element vector and other vectors*/
-	ElementMatrix* Ke     = workelement->NewElementMatrix();
-	IssmDouble*    B      = xNew<IssmDouble>(dim*numnodes);
-	IssmDouble*    Bprime = xNew<IssmDouble>(dim*numnodes);
-	IssmDouble*		dlsf   = xNew<IssmDouble>(dim);
-	IssmDouble*		normal = xNew<IssmDouble>(dim);
-   IssmDouble*		D	    = xNewZeroInit<IssmDouble>(dim*dim);
+	ElementMatrix *Ke     = workelement->NewElementMatrix();
+	IssmDouble    *basis  = xNew<IssmDouble>(numnodes);
+	IssmDouble    *dbasis = xNew<IssmDouble>(dim*numnodes);
+	IssmDouble     dlsf[3];
+	IssmDouble     normal[3];
 
 	/*Retrieve all inputs and parameters*/
-	Input* lsf_slopex_input=workelement->GetInput(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
-	Input* lsf_slopey_input=workelement->GetInput(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
+	Input2* lsf_slopex_input=workelement->GetInput2(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
+	Input2* lsf_slopey_input=workelement->GetInput2(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
 	workelement->GetVerticesCoordinates(&xyz_list);
 
@@ -143,18 +142,15 @@
 
 		workelement->JacobianDeterminant(&Jdet,xyz_list,gauss);
-		GetB(B,workelement,xyz_list,gauss,dim);
-		GetBprime(Bprime,workelement,xyz_list,gauss,dim);
+		workelement->NodalFunctions(basis,gauss);
+		workelement->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
 
 		D_scalar=gauss->weight*Jdet;
 
 		if(extrapolatebydiffusion){
-
-			/* diffuse values outward only along the xy-plane*/
-         for(int i=0;i<2;i++) D[i*dim+i] = D_scalar;
-
-			TripleMultiply(Bprime,dim,numnodes,1,
-					D,dim,dim,0,
-					Bprime,dim,numnodes,0,
-					&Ke->values[0],1);
+			for(int i=0;i<numnodes;i++){
+				for(int j=0;j<numnodes;j++){
+					Ke->values[i*numnodes+j] += D_scalar*(dbasis[0*numnodes+j]*dbasis[0*numnodes+i] + dbasis[1*numnodes+j]*dbasis[1*numnodes+i]);
+				}
+			}
 		}
 		else{
@@ -175,14 +171,9 @@
 				for(i=0;i<dim;i++)	normal[i]=0.;
 
-			for(row=0;row<dim;row++)
-				for(col=0;col<dim;col++)
-					if(row==col)
-						D[row*dim+col]=D_scalar*normal[row];
-					else
-						D[row*dim+col]=0.;
-			TripleMultiply(B,dim,numnodes,1,
-						D,dim,dim,0,
-						Bprime,dim,numnodes,0,
-						&Ke->values[0],1);
+			for(int i=0;i<numnodes;i++){
+				for(int j=0;j<numnodes;j++){
+					Ke->values[i*numnodes+j] += D_scalar*(normal[0]*dbasis[0*numnodes+j]*basis[i] + normal[1]*dbasis[1*numnodes+j]*basis[i]);
+				}
+			}
 
 			/* stabilization */
@@ -195,14 +186,10 @@
 				h=sqrt(pow(hx*normal[0],2) + pow(hy*normal[1],2));
 				kappa=h/2.+1.e-14; 
-				for(row=0;row<dim;row++)
-					for(col=0;col<dim;col++)
-						if(row==col)
-							D[row*dim+col]=D_scalar*kappa;
-						else
-							D[row*dim+col]=0.;
-				TripleMultiply(Bprime,dim,numnodes,1,
-							D,dim,dim,0,
-							Bprime,dim,numnodes,0,
-							&Ke->values[0],1);
+
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j] += D_scalar*kappa*(dbasis[0*numnodes+j]*dbasis[0*numnodes+i] + dbasis[1*numnodes+j]*dbasis[1*numnodes+i]);
+					}
+				}
 			}
 		}
@@ -211,9 +198,6 @@
 	/*Clean up and return*/
 	xDelete<IssmDouble>(xyz_list);
-	xDelete<IssmDouble>(B);
-	xDelete<IssmDouble>(Bprime);
-	xDelete<IssmDouble>(D);
-	xDelete<IssmDouble>(dlsf);
-	xDelete<IssmDouble>(normal);
+	xDelete<IssmDouble>(basis);
+	xDelete<IssmDouble>(dbasis);
 	delete gauss;
 	if(extrapolationcase==0){workelement->DeleteMaterials(); delete workelement;};
@@ -223,58 +207,4 @@
 ElementVector* ExtrapolationAnalysis::CreatePVector(Element* element){/*{{{*/
 	return NULL;
-
-}/*}}}*/
-void           ExtrapolationAnalysis::GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss, int dim){/*{{{*/
-	/*Compute B  matrix. B=[B1 B2 B3] where Bi is of size 3*NDOF2. 
-	 * For node i, Bi can be expressed in the actual coordinate system
-	 * by: 
-	 *       Bi=[ N ]
-	 *          [ N ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B_prog has been allocated already, of size: 2x(NDOF1*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions*/
-	IssmDouble* basis=xNew<IssmDouble>(numnodes);
-	element->NodalFunctions(basis,gauss);
-
-	/*Build B: */
-	for(int i=0;i<numnodes;i++)
-		for(int d=0;d<dim;d++)
-			B[numnodes*d+i] = basis[i];
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(basis);
-}/*}}}*/
-void           ExtrapolationAnalysis::GetBprime(IssmDouble* Bprime,Element* element,IssmDouble* xyz_list,Gauss* gauss, int dim){/*{{{*/
-	/*Compute B'  matrix. B'=[B1' B2' B3'] where Bi' is of size 3*NDOF2. 
-	 * For node i, Bi' can be expressed in the actual coordinate system
-	 * by: 
-	 *       Bi_prime=[ dN/dx ]
-	 *                [ dN/dy ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B' has been allocated already, of size: 3x(NDOF2*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions derivatives*/
-	IssmDouble* dbasis=xNew<IssmDouble>(dim*numnodes);
-	element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
-
-	/*Build B': */
-	for(int i=0;i<numnodes;i++)
-		for(int d=0;d<dim;d++)
-			Bprime[numnodes*d+i] = dbasis[d*numnodes+i];
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(dbasis);
-
 }/*}}}*/
 void           ExtrapolationAnalysis::GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element){/*{{{*/
@@ -304,5 +234,5 @@
 	}
 }/*}}}*/
-int				ExtrapolationAnalysis::GetExtrapolationCase(Element* element){/*{{{*/
+int            ExtrapolationAnalysis::GetExtrapolationCase(Element* element){/*{{{*/
 
 	/* Get case of extrapolation, depending on domain quality, and extrapolation variable */
@@ -338,6 +268,6 @@
 	element->FindParam(&extvar_enum, ExtrapolationVariableEnum);
 
-	Input* active_input=element->GetInput(IceMaskNodeActivationEnum); _assert_(active_input);
-	Input* extvar_input=element->GetInput(extvar_enum); _assert_(extvar_input);
+	Input2* active_input=element->GetInput2(IceMaskNodeActivationEnum); _assert_(active_input);
+	Input2* extvar_input=element->GetInput2(extvar_enum); _assert_(extvar_input);
 
 	Gauss* gauss=element->NewGauss();
Index: /issm/trunk/src/c/analyses/ExtrapolationAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/ExtrapolationAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/ExtrapolationAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 	void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 	int  DofsPerNode(int** doflist,int domaintype,int approximation);
-	void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+	void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 	void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
@@ -26,6 +26,4 @@
 	ElementMatrix* CreateKMatrix(Element* element);
 	ElementVector* CreatePVector(Element* element);
-	void           GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss, int dim);
-	void           GetBprime(IssmDouble* Bprime,Element* element,IssmDouble* xyz_list,Gauss* gauss, int dim);
 	void           GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element);
 	void           GradientJ(Vector<IssmDouble>* gradient,Element* element,int control_type,int control_index);
Index: /issm/trunk/src/c/analyses/ExtrudeFromBaseAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/ExtrudeFromBaseAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/ExtrudeFromBaseAnalysis.cpp	(revision 24686)
@@ -18,5 +18,5 @@
 	return 1;
 }/*}}}*/
-void ExtrudeFromBaseAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void ExtrudeFromBaseAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int counter=0;
@@ -24,5 +24,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -30,5 +30,5 @@
 
 	if(iomodel->domaintype==Domain2DverticalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
 	}
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/ExtrudeFromBaseAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/ExtrudeFromBaseAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/ExtrudeFromBaseAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/ExtrudeFromTopAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/ExtrudeFromTopAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/ExtrudeFromTopAnalysis.cpp	(revision 24686)
@@ -18,5 +18,5 @@
 	return 1;
 }/*}}}*/
-void ExtrudeFromTopAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void ExtrudeFromTopAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int counter=0;
@@ -24,5 +24,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -30,5 +30,5 @@
 
 	if(iomodel->domaintype==Domain2DverticalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
 	}
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/ExtrudeFromTopAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/ExtrudeFromTopAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/ExtrudeFromTopAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/FreeSurfaceBaseAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/FreeSurfaceBaseAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/FreeSurfaceBaseAnalysis.cpp	(revision 24686)
@@ -55,5 +55,5 @@
 	return 1;
 }/*}}}*/
-void FreeSurfaceBaseAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void FreeSurfaceBaseAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Now, is the model 3d? otherwise, do nothing: */
@@ -68,21 +68,21 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum,0.);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
 	if(iomodel->domaindim==3){
-		iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum);
 	}
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 
@@ -92,5 +92,5 @@
 	switch(basalforcing_model){
 		case FloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
 			break;
 		case LinearFloatingMeltRateEnum:
@@ -101,17 +101,18 @@
 			break;
 		case SpatialLinearFloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
 			break;
 		case BasalforcingsPicoEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.overturning_coeff",BasalforcingsPicoOverturningCoeffEnum);
 			break;
 		case BasalforcingsIsmip6Enum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
 			break;
 		case BeckmannGoosseFloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
 			break;
 		default:
@@ -176,7 +177,7 @@
 	basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
 	basalelement->FindParam(&stabilization,MasstransportStabilizationEnum);
-	Input* vx_input=basalelement->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=NULL;
-	if(dim>1){vy_input = basalelement->GetInput(VyEnum); _assert_(vy_input);}
+	Input2* vx_input=basalelement->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=NULL;
+	if(dim>1){vy_input = basalelement->GetInput2(VyEnum); _assert_(vy_input);}
 	h = basalelement->CharacteristicLength();
 
@@ -295,12 +296,12 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input* groundedice_input   = basalelement->GetInput(MaskGroundediceLevelsetEnum);              _assert_(groundedice_input);
-	Input* gmb_input           = basalelement->GetInput(BasalforcingsGroundediceMeltingRateEnum);  _assert_(gmb_input);
-	Input* fmb_input           = basalelement->GetInput(BasalforcingsFloatingiceMeltingRateEnum);  _assert_(fmb_input);
-	Input* base_input          = basalelement->GetInput(BaseEnum);                                 _assert_(base_input);
-	Input* vz_input      = NULL;
+	Input2* groundedice_input   = basalelement->GetInput2(MaskGroundediceLevelsetEnum);              _assert_(groundedice_input);
+	Input2* gmb_input           = basalelement->GetInput2(BasalforcingsGroundediceMeltingRateEnum);  _assert_(gmb_input);
+	Input2* fmb_input           = basalelement->GetInput2(BasalforcingsFloatingiceMeltingRateEnum);  _assert_(fmb_input);
+	Input2* base_input          = basalelement->GetInput2(BaseEnum);                                 _assert_(base_input);
+	Input2* vz_input      = NULL;
 	switch(dim){
-		case 1: vz_input = basalelement->GetInput(VyEnum); _assert_(vz_input); break;
-		case 2: vz_input = basalelement->GetInput(VzEnum); _assert_(vz_input); break;
+		case 1: vz_input = basalelement->GetInput2(VyEnum); _assert_(vz_input); break;
+		case 2: vz_input = basalelement->GetInput2(VzEnum); _assert_(vz_input); break;
 		default: _error_("not implemented");
 	}
@@ -410,7 +411,7 @@
 
 		int             numnodes = element->GetNumberOfNodes();
-		Input* groundedice_input = element->GetInput(MaskGroundediceLevelsetEnum);  _assert_(groundedice_input);
-		Input* onbase_input       = element->GetInput(MeshVertexonbaseEnum);          _assert_(onbase_input);
-		Input* base_input        = element->GetInput(BaseEnum);                     _assert_(base_input);
+		Input2* groundedice_input = element->GetInput2(MaskGroundediceLevelsetEnum);  _assert_(groundedice_input);
+		Input2* onbase_input       = element->GetInput2(MeshVertexonbaseEnum);          _assert_(onbase_input);
+		Input2* base_input        = element->GetInput2(BaseEnum);                     _assert_(base_input);
 
 		Gauss* gauss=element->NewGauss();
Index: /issm/trunk/src/c/analyses/FreeSurfaceBaseAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/FreeSurfaceBaseAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/FreeSurfaceBaseAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/FreeSurfaceTopAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/FreeSurfaceTopAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/FreeSurfaceTopAnalysis.cpp	(revision 24686)
@@ -55,5 +55,5 @@
 	return 1;
 }/*}}}*/
-void FreeSurfaceTopAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void FreeSurfaceTopAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Now, is the model 3d? otherwise, do nothing: */
@@ -71,23 +71,23 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
 	}
 	if(iomodel->domaindim==3){
-		iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum);
 	}
 	switch(smb_model){
 		case SMBforcingEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum,0.);
 			break;
 		default:
@@ -153,7 +153,7 @@
 	topelement->FindParam(&dt,TimesteppingTimeStepEnum);
 	topelement->FindParam(&stabilization,MasstransportStabilizationEnum);
-	Input* vx_input=topelement->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=NULL;
-	if(dim>1){vy_input = topelement->GetInput(VyEnum); _assert_(vy_input);}
+	Input2* vx_input=topelement->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=NULL;
+	if(dim>1){vy_input = topelement->GetInput2(VyEnum); _assert_(vy_input);}
 	h = topelement->CharacteristicLength();
 
@@ -211,4 +211,5 @@
 				vx_input->GetInputAverage(&vx);
 				vy_input->GetInputAverage(&vy);
+
 				D[0*dim+0]=h/2.0*fabs(vx);
 				D[1*dim+1]=h/2.0*fabs(vy);
@@ -272,10 +273,10 @@
 	topelement->GetVerticesCoordinates(&xyz_list);
 	topelement->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input* ms_input      = topelement->GetInput(SmbMassBalanceEnum);  _assert_(ms_input);
-	Input* surface_input = topelement->GetInput(SurfaceEnum);                     _assert_(surface_input);
-	Input* vz_input      = NULL;
+	Input2* ms_input      = topelement->GetInput2(SmbMassBalanceEnum);  _assert_(ms_input);
+	Input2* surface_input = topelement->GetInput2(SurfaceEnum);                     _assert_(surface_input);
+	Input2* vz_input      = NULL;
 	switch(dim){
-		case 1: vz_input = topelement->GetInput(VyEnum); _assert_(vz_input); break;
-		case 2: vz_input = topelement->GetInput(VzEnum); _assert_(vz_input); break;
+		case 1: vz_input = topelement->GetInput2(VyEnum); _assert_(vz_input); break;
+		case 2: vz_input = topelement->GetInput2(VzEnum); _assert_(vz_input); break;
 		default: _error_("not implemented");
 	}
Index: /issm/trunk/src/c/analyses/FreeSurfaceTopAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/FreeSurfaceTopAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/FreeSurfaceTopAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/GLheightadvectionAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/GLheightadvectionAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/GLheightadvectionAnalysis.cpp	(revision 24686)
@@ -24,5 +24,5 @@
 	return 1;
 }/*}}}*/
-void GLheightadvectionAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void GLheightadvectionAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -31,5 +31,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -37,8 +37,8 @@
 
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.mesh.vertexonboundary",MeshVertexonboundaryEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonboundary",MeshVertexonboundaryEnum);
 }/*}}}*/
 void GLheightadvectionAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -71,7 +71,7 @@
 	IssmDouble vel,vx,vy;
 	IssmDouble* xyz_list      = NULL;
-	Input* vx_input           = NULL;
-	Input* vy_input           = NULL;
-	Input* bc_input           = NULL;
+	Input2* vx_input           = NULL;
+	Input2* vy_input           = NULL;
+	Input2* bc_input           = NULL;
 
 	/*Get problem dimension*/
@@ -97,12 +97,12 @@
 	switch(domaintype){
 		case Domain2DhorizontalEnum:
-			vx_input=basalelement->GetInput(VxEnum); _assert_(vx_input);
-			vy_input=basalelement->GetInput(VyEnum); _assert_(vy_input);
-		   bc_input=basalelement->GetInput(MeshVertexonboundaryEnum); _assert_(bc_input);
+			vx_input=basalelement->GetInput2(VxEnum); _assert_(vx_input);
+			vy_input=basalelement->GetInput2(VyEnum); _assert_(vy_input);
+		   bc_input=basalelement->GetInput2(MeshVertexonboundaryEnum); _assert_(bc_input);
 		break;
 		case Domain3DEnum:
-			vx_input=basalelement->GetInput(VxAverageEnum); _assert_(vx_input);
-			vy_input=basalelement->GetInput(VyAverageEnum); _assert_(vy_input);
-			bc_input=basalelement->GetInput(MeshVertexonboundaryEnum); _assert_(bc_input);
+			vx_input=basalelement->GetInput2(VxAverageEnum); _assert_(vx_input);
+			vy_input=basalelement->GetInput2(VyAverageEnum); _assert_(vy_input);
+			bc_input=basalelement->GetInput2(MeshVertexonboundaryEnum); _assert_(bc_input);
 		break;
 		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
Index: /issm/trunk/src/c/analyses/GLheightadvectionAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/GLheightadvectionAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/GLheightadvectionAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/GiaIvinsAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/GiaIvinsAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/GiaIvinsAnalysis.cpp	(revision 24686)
@@ -18,5 +18,5 @@
 	return 1;
 }/*}}}*/
-void GiaIvinsAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void GiaIvinsAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -25,13 +25,13 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.gia.mantle_viscosity",GiaMantleViscosityEnum);
-	iomodel->FetchDataToInput(elements,"md.gia.lithosphere_thickness",GiaLithosphereThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.gia.mantle_viscosity",GiaMantleViscosityEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.gia.lithosphere_thickness",GiaLithosphereThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
 }/*}}}*/
 void GiaIvinsAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
Index: /issm/trunk/src/c/analyses/GiaIvinsAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/GiaIvinsAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/GiaIvinsAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.cpp	(revision 24686)
@@ -9,5 +9,4 @@
 	return 1;
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
 
@@ -35,6 +34,5 @@
 	parameters->AddObject(new IntParam(HydrologydcEplThickCompEnum,eplthickcomp));
 }/*}}}*/
-
-void HydrologyDCEfficientAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void HydrologyDCEfficientAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	bool   isefficientlayer;
@@ -54,21 +52,20 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.epl_head",EplHeadSubstepEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.sediment_head",SedimentHeadSubstepEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.epl_thickness",HydrologydcEplThicknessSubstepEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.basal_moulin_input",HydrologydcBasalMoulinInputEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.epl_head",EplHeadSubstepEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.sediment_head",SedimentHeadSubstepEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.epl_thickness",HydrologydcEplThicknessSubstepEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.basal_moulin_input",HydrologydcBasalMoulinInputEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-}/*}}}*/
-
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+}/*}}}*/
 void HydrologyDCEfficientAnalysis::CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr){/*{{{*/
 
@@ -89,5 +86,4 @@
 	iomodel->DeleteData(2,"md.mesh.vertexonbase","md.mesh.vertexonsurface");
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::CreateConstraints(Constraints* constraints,IoModel* iomodel){/*{{{*/
 
@@ -104,5 +100,4 @@
 	IoModelToConstraintsx(constraints,iomodel,"md.hydrology.spcepl_head",HydrologyDCEfficientAnalysisEnum,P1Enum);
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::CreateLoads(Loads* loads, IoModel* iomodel){/*{{{*/
 
@@ -139,5 +134,4 @@
 	iomodel->DeleteData(1,"md.mesh.vertexonbase");
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::InitZigZagCounter(FemModel* femmodel){/*{{{*/
 
@@ -147,5 +141,4 @@
 	xDelete<int>(eplzigzag_counter);
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::ResetCounter(FemModel* femmodel){/*{{{*/
 
@@ -163,14 +156,11 @@
 	_error_("not implemented");
 }/*}}}*/
-
 ElementVector* HydrologyDCEfficientAnalysis::CreateDVector(Element* element){/*{{{*/
 	/*Default, return NULL*/
 	return NULL;
 }/*}}}*/
-
 ElementMatrix* HydrologyDCEfficientAnalysis::CreateJacobianMatrix(Element* element){/*{{{*/
 _error_("Not implemented");
 }/*}}}*/
-
 ElementMatrix* HydrologyDCEfficientAnalysis::CreateKMatrix(Element* element){/*{{{*/
 
@@ -193,6 +183,5 @@
 	}
 
-	Input* active_element_input = basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
 
 	/*Check that all nodes are active, else return empty matrix*/
@@ -224,8 +213,7 @@
 	basalelement->GetVerticesCoordinates(&xyz_list);
 	basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
-
-	Input* epl_thick_input = basalelement->GetInput(HydrologydcEplThicknessSubstepEnum); _assert_(epl_thick_input);
-	Input* epl_head_input	= basalelement->GetInput(EplHeadSubstepEnum);  _assert_(epl_head_input);
-	Input* base_input			= basalelement->GetInput(BaseEnum); _assert_(base_input);
+	Input2* epl_thick_input = basalelement->GetInput2(HydrologydcEplThicknessSubstepEnum); _assert_(epl_thick_input);
+	Input2* epl_head_input  = basalelement->GetInput2(EplHeadSubstepEnum);  _assert_(epl_head_input);
+	Input2* base_input      = basalelement->GetInput2(BaseEnum); _assert_(base_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -279,5 +267,4 @@
 	return Ke;
 }/*}}}*/
-
 ElementVector* HydrologyDCEfficientAnalysis::CreatePVector(Element* element){/*{{{*/
 
@@ -300,6 +287,5 @@
 	}
 
-	Input* active_element_input = basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
 
 	/*Check that all nodes are active, else return empty matrix*/
@@ -319,7 +305,7 @@
 	IssmDouble residual,connectivity;
 
-	IssmDouble		*xyz_list				 = NULL;
-	Input*			 old_wh_input			 = NULL;
-	Input*			 surface_runoff_input = NULL;
+	IssmDouble *xyz_list             = NULL;
+	Input2     *old_wh_input         = NULL;
+	Input2     *surface_runoff_input = NULL;
 
 	/*Fetch number of nodes and dof for this finite element*/
@@ -336,16 +322,16 @@
 	basalelement ->FindParam(&smb_model,SmbEnum);
 
-	Input*	epl_thick_input			 = basalelement->GetInput(HydrologydcEplThicknessSubstepEnum); _assert_(epl_thick_input);
-	Input*	sed_head_input			 = basalelement->GetInput(SedimentHeadSubstepEnum); _assert_(sed_head_input);
-	Input*	epl_head_input			 = basalelement->GetInput(EplHeadSubstepEnum); _assert_(epl_head_input);
-	Input*	basal_melt_input		 = basalelement->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(basal_melt_input);
-	Input*	residual_input			 = basalelement->GetInput(SedimentHeadResidualEnum); _assert_(residual_input);
-	Input*	base_input					 = basalelement->GetInput(BaseEnum); _assert_(base_input);
+	Input2*	epl_thick_input  = basalelement->GetInput2(HydrologydcEplThicknessSubstepEnum); _assert_(epl_thick_input);
+	Input2*	sed_head_input   = basalelement->GetInput2(SedimentHeadSubstepEnum); _assert_(sed_head_input);
+	Input2*	epl_head_input   = basalelement->GetInput2(EplHeadSubstepEnum); _assert_(epl_head_input);
+	Input2*	basal_melt_input = basalelement->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(basal_melt_input);
+	Input2*	residual_input   = basalelement->GetInput2(SedimentHeadResidualEnum); _assert_(residual_input);
+	Input2*	base_input       = basalelement->GetInput2(BaseEnum); _assert_(base_input);
 
 	if(dt!= 0.){
-		old_wh_input = basalelement->GetInput(EplHeadOldEnum);            _assert_(old_wh_input);
+		old_wh_input = basalelement->GetInput2(EplHeadOldEnum);            _assert_(old_wh_input);
 	}
 	if(smb_model==SMBgradientscomponentsEnum){
-		surface_runoff_input = basalelement->GetInput(SmbRunoffEnum); _assert_(surface_runoff_input);
+		surface_runoff_input = basalelement->GetInput2(SmbRunoffEnum); _assert_(surface_runoff_input);
 	}
 
@@ -397,13 +383,37 @@
 	return pe;
 }/*}}}*/
-
+void HydrologyDCEfficientAnalysis::GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
+	/*Compute B  matrix. B=[B1 B2 B3] where Bi is of size 3*NDOF2.
+	 * For node i, Bi can be expressed in the actual coordinate system
+	 * by:
+	 *       Bi=[ dN/dx ]
+	 *          [ dN/dy ]
+	 * where N is the finiteelement function for node i.
+	 *
+	 * We assume B has been allocated already, of size: 3x(NDOF2*numnodes)
+	 */
+
+	/*Fetch number of nodes for this finite element*/
+	int numnodes = element->GetNumberOfNodes();
+
+	/*Get nodal functions derivatives*/
+	IssmDouble* dbasis=xNew<IssmDouble>(2*numnodes);
+	element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
+
+	/*Build B: */
+	for(int i=0;i<numnodes;i++){
+		B[numnodes*0+i] = dbasis[0*numnodes+i];
+		B[numnodes*1+i] = dbasis[1*numnodes+i];
+	}
+
+	/*Clean-up*/
+	xDelete<IssmDouble>(dbasis);
+}/*}}}*/
 void HydrologyDCEfficientAnalysis::GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element){/*{{{*/
 	element->GetSolutionFromInputsOneDof(solution,EplHeadSubstepEnum);
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::GradientJ(Vector<IssmDouble>* gradient,Element* element,int control_type,int control_index){/*{{{*/
 	_error_("Not implemented yet");
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::InputUpdateFromSolution(IssmDouble* solution,Element* element){/*{{{*/
 	/*Intermediaries*/
@@ -441,5 +451,6 @@
 	}
 	/*Add input to the element: */
-	element->AddBasalInput(EplHeadSubstepEnum,eplHeads,P1Enum);
+	element->AddBasalInput2(EplHeadSubstepEnum,eplHeads,P1Enum);
+
 	/*Free ressources:*/
 	xDelete<IssmDouble>(eplHeads);
@@ -447,5 +458,4 @@
 	if(domaintype!=Domain2DhorizontalEnum){basalelement->DeleteMaterials(); delete basalelement;};
 } /*}}}*/
-
 void HydrologyDCEfficientAnalysis::UpdateConstraints(FemModel* femmodel){/*{{{*/
 	/*Default, do nothing*/
@@ -454,4 +464,84 @@
 
 /*Intermediaries*/
+IssmDouble HydrologyDCEfficientAnalysis::EplStoring(Element* element,Gauss* gauss, Input2* epl_thick_input, Input2* epl_head_input, Input2* base_input){/*{{{*/
+	IssmDouble epl_storing;
+	IssmDouble water_sheet,storing;
+	IssmDouble epl_thickness,prestep_head,base_elev;
+	IssmDouble rho_freshwater        = element->FindParam(MaterialsRhoFreshwaterEnum);
+	IssmDouble g                     = element->FindParam(ConstantsGEnum);
+	IssmDouble epl_porosity					 = element->FindParam(HydrologydcEplPorosityEnum);
+	IssmDouble epl_compressibility	 = element->FindParam(HydrologydcEplCompressibilityEnum);
+	IssmDouble water_compressibility = element->FindParam(HydrologydcWaterCompressibilityEnum);
+
+	epl_thick_input->GetInputValue(&epl_thickness,gauss);
+	epl_head_input->GetInputValue(&prestep_head,gauss);
+	base_input->GetInputValue(&base_elev,gauss);
+	water_sheet=max(0.0,(prestep_head-base_elev));
+	storing=rho_freshwater*g*epl_porosity*epl_thickness*(water_compressibility+(epl_compressibility/epl_porosity));
+
+	/* //porosity for unconfined region */
+	/* if (water_sheet<=0.9*epl_thickness){ */
+	/* 	epl_storing=epl_porosity; */
+	/* } */
+	/* //continuity ramp */
+	/* else if((water_sheet<epl_thickness) && (water_sheet>0.9*epl_thickness)){ */
+	/* 	epl_storing=(epl_thickness-water_sheet)*(epl_porosity-storing)/(0.1*epl_thickness)+storing; */
+	/* } */
+	/* //storing coefficient for confined */
+	/* else{ */
+	/* 	epl_storing=storing; */
+	/* } */
+ 	/* return epl_storing; */
+	return storing;
+}/*}}}*/
+IssmDouble HydrologyDCEfficientAnalysis::EplTransmitivity(Element* element,Gauss* gauss, Input2* epl_thick_input, Input2* epl_head_input, Input2* base_input){/*{{{*/
+	IssmDouble epl_transmitivity;
+	IssmDouble water_sheet;
+	IssmDouble epl_thickness,base_elev,prestep_head;
+	IssmDouble epl_conductivity      = element->FindParam(HydrologydcEplConductivityEnum);
+	epl_thick_input->GetInputValue(&epl_thickness,gauss);
+	epl_head_input->GetInputValue(&prestep_head,gauss);
+	base_input->GetInputValue(&base_elev,gauss);
+
+	water_sheet=max(0.0,(prestep_head-base_elev));
+	epl_transmitivity=epl_conductivity*epl_thickness;
+	//epl_transmitivity=max(1.0e-6,(epl_conductivity*min(water_sheet,epl_thickness)));
+	return epl_transmitivity;
+}/*}}}*/
+void HydrologyDCEfficientAnalysis::GetHydrologyDCInefficientHmax(IssmDouble* ph_max,Element* element, Node* innode){/*{{{*/
+
+	int        hmax_flag;
+	IssmDouble h_max;
+	IssmDouble rho_ice,rho_water;
+	IssmDouble thickness,bed;
+	/*Get the flag to the limitation method*/
+	element->FindParam(&hmax_flag,HydrologydcSedimentlimitFlagEnum);
+
+	/*Switch between the different cases*/
+	switch(hmax_flag){
+	case 0:
+		h_max=1.0e+10;
+		break;
+	case 1:
+		element->FindParam(&h_max,HydrologydcSedimentlimitEnum);
+		break;
+	case 2:
+		/*Compute max*/
+		rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
+		rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+		element-> GetInputValue(&thickness,innode,ThicknessEnum);
+		element-> GetInputValue(&bed,innode,BaseEnum);
+		h_max=((rho_ice*thickness)/rho_water)+bed;
+		break;
+	case 3:
+		_error_("Using normal stress  not supported yet");
+		break;
+	default:
+		_error_("no case higher than 3 for SedimentlimitFlag");
+	}
+	/*Assign output pointer*/
+	*ph_max=h_max;
+}
+/*}}}*/
 IssmDouble HydrologyDCEfficientAnalysis::GetHydrologyKMatrixTransfer(Element* element){/*{{{*/
 
@@ -475,6 +565,5 @@
 	return transfer;
 }/*}}}*/
-
-IssmDouble HydrologyDCEfficientAnalysis::GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input* sed_head_input){/*{{{*/
+IssmDouble HydrologyDCEfficientAnalysis::GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input2* sed_head_input){/*{{{*/
 
 	int transfermethod;
@@ -501,5 +590,4 @@
 	return transfer;
 }/*}}}*/
-
 void HydrologyDCEfficientAnalysis::ComputeEPLThickness(FemModel* femmodel){/*{{{*/
 
@@ -534,6 +622,5 @@
 		IssmDouble* bed           = xNew<IssmDouble>(numnodes);
 
-		Input* 	active_element_input=element->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-		active_element_input->GetInputValue(&active_element);
+		element->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
 		element->FindParam(&dt,TimesteppingTimeStepEnum);
 
@@ -590,5 +677,5 @@
 			}
 		}
-		element->AddInput(HydrologydcEplThicknessSubstepEnum,thickness,element->GetElementType());
+		element->AddInput2(HydrologydcEplThicknessSubstepEnum,thickness,element->GetElementType());
 		xDelete<IssmDouble>(thickness);
 		xDelete<IssmDouble>(eplhead);
@@ -602,34 +689,5 @@
 	}
 }/*}}}*/
-
-void HydrologyDCEfficientAnalysis::GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
-	/*Compute B  matrix. B=[B1 B2 B3] where Bi is of size 3*NDOF2.
-	 * For node i, Bi can be expressed in the actual coordinate system
-	 * by:
-	 *       Bi=[ dN/dx ]
-	 *          [ dN/dy ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B has been allocated already, of size: 3x(NDOF2*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions derivatives*/
-	IssmDouble* dbasis=xNew<IssmDouble>(2*numnodes);
-	element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
-
-	/*Build B: */
-	for(int i=0;i<numnodes;i++){
-		B[numnodes*0+i] = dbasis[0*numnodes+i];
-		B[numnodes*1+i] = dbasis[1*numnodes+i];
-	}
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(dbasis);
-}/*}}}*/
-
-void  HydrologyDCEfficientAnalysis::HydrologyEPLGetMask(Vector<IssmDouble>* vec_mask, Vector<IssmDouble>* recurence, Element* element){
+void  HydrologyDCEfficientAnalysis::HydrologyEPLGetMask(Vector<IssmDouble>* vec_mask, Vector<IssmDouble>* recurence, Element* element){/*{{{*/
 
 	bool        active_element;
@@ -663,6 +721,5 @@
 	IssmDouble colapse_thick =basalelement->FindParam(HydrologydcEplColapseThicknessEnum);
 
-	Input* active_element_input=basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
 
 	basalelement-> GetInputListOnVertices(&old_active[0],HydrologydcMaskEplactiveNodeEnum);
@@ -712,5 +769,5 @@
 		}
 	}
-	basalelement->AddInput(HydrologydcEplThicknessSubstepEnum,epl_thickness,basalelement->GetElementType());
+	element->AddBasalInput2(HydrologydcEplThicknessSubstepEnum,epl_thickness,basalelement->GetElementType());
 
 	if(domaintype!=Domain2DhorizontalEnum){
@@ -726,51 +783,4 @@
 }
 /*}}}*/
-IssmDouble HydrologyDCEfficientAnalysis::EplStoring(Element* element,Gauss* gauss, Input* epl_thick_input, Input* epl_head_input, Input* base_input){/*{{{*/
-	IssmDouble epl_storing;
-	IssmDouble water_sheet,storing;
-	IssmDouble epl_thickness,prestep_head,base_elev;
-	IssmDouble rho_freshwater        = element->FindParam(MaterialsRhoFreshwaterEnum);
-	IssmDouble g                     = element->FindParam(ConstantsGEnum);
-	IssmDouble epl_porosity					 = element->FindParam(HydrologydcEplPorosityEnum);
-	IssmDouble epl_compressibility	 = element->FindParam(HydrologydcEplCompressibilityEnum);
-	IssmDouble water_compressibility = element->FindParam(HydrologydcWaterCompressibilityEnum);
-
-	epl_thick_input->GetInputValue(&epl_thickness,gauss);
-	epl_head_input->GetInputValue(&prestep_head,gauss);
-	base_input->GetInputValue(&base_elev,gauss);
-	water_sheet=max(0.0,(prestep_head-base_elev));
-	storing=rho_freshwater*g*epl_porosity*epl_thickness*(water_compressibility+(epl_compressibility/epl_porosity));
-
-	/* //porosity for unconfined region */
-	/* if (water_sheet<=0.9*epl_thickness){ */
-	/* 	epl_storing=epl_porosity; */
-	/* } */
-	/* //continuity ramp */
-	/* else if((water_sheet<epl_thickness) && (water_sheet>0.9*epl_thickness)){ */
-	/* 	epl_storing=(epl_thickness-water_sheet)*(epl_porosity-storing)/(0.1*epl_thickness)+storing; */
-	/* } */
-	/* //storing coefficient for confined */
-	/* else{ */
-	/* 	epl_storing=storing; */
-	/* } */
- 	/* return epl_storing; */
-	return storing;
-}/*}}}*/
-
-IssmDouble HydrologyDCEfficientAnalysis::EplTransmitivity(Element* element,Gauss* gauss, Input* epl_thick_input, Input* epl_head_input, Input* base_input){/*{{{*/
-	IssmDouble epl_transmitivity;
-	IssmDouble water_sheet;
-	IssmDouble epl_thickness,base_elev,prestep_head;
-	IssmDouble epl_conductivity      = element->FindParam(HydrologydcEplConductivityEnum);
-	epl_thick_input->GetInputValue(&epl_thickness,gauss);
-	epl_head_input->GetInputValue(&prestep_head,gauss);
-	base_input->GetInputValue(&base_elev,gauss);
-
-	water_sheet=max(0.0,(prestep_head-base_elev));
-	epl_transmitivity=epl_conductivity*epl_thickness;
-	//epl_transmitivity=max(1.0e-6,(epl_conductivity*min(water_sheet,epl_thickness)));
-	return epl_transmitivity;
-}/*}}}*/
-
 void HydrologyDCEfficientAnalysis::HydrologyEPLGetActive(Vector<IssmDouble>* active_vec, Element* element){/*{{{*/
 	/*Constants*/
@@ -798,6 +808,5 @@
 	/*Pass the activity mask from elements to nodes*/
 	basalelement->GetInputListOnVertices(&active[0],HydrologydcMaskEplactiveNodeEnum);
-	Input* 	active_element_input=basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInputValue(&active_element,HydrologydcMaskEplactiveEltEnum);
 
 	for(int i=0;i<numnodes;i++) flag+=active[i];
@@ -822,38 +831,2 @@
 }
 /*}}}*/
-
-void HydrologyDCEfficientAnalysis::GetHydrologyDCInefficientHmax(IssmDouble* ph_max,Element* element, Node* innode){/*{{{*/
-
-	int        hmax_flag;
-	IssmDouble h_max;
-	IssmDouble rho_ice,rho_water;
-	IssmDouble thickness,bed;
-	/*Get the flag to the limitation method*/
-	element->FindParam(&hmax_flag,HydrologydcSedimentlimitFlagEnum);
-
-	/*Switch between the different cases*/
-	switch(hmax_flag){
-	case 0:
-		h_max=1.0e+10;
-		break;
-	case 1:
-		element->FindParam(&h_max,HydrologydcSedimentlimitEnum);
-		break;
-	case 2:
-		/*Compute max*/
-		rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
-		rho_ice   = element->FindParam(MaterialsRhoIceEnum);
-		element-> GetInputValue(&thickness,innode,ThicknessEnum);
-		element-> GetInputValue(&bed,innode,BaseEnum);
-		h_max=((rho_ice*thickness)/rho_water)+bed;
-		break;
-	case 3:
-		_error_("Using normal stress  not supported yet");
-		break;
-	default:
-		_error_("no case higher than 3 for SedimentlimitFlag");
-	}
-	/*Assign output pointer*/
-	*ph_max=h_max;
-}
-/*}}}*/
Index: /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyDCEfficientAnalysis.h	(revision 24686)
@@ -9,5 +9,5 @@
 #include "./Analysis.h"
 class Node;
-class Input;
+class Input2;
 class HydrologyDCEfficientAnalysis: public Analysis{
 
@@ -16,5 +16,5 @@
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		void CreateConstraints(Constraints* constraints,IoModel* iomodel);
@@ -29,4 +29,5 @@
 		ElementMatrix* CreateKMatrix(Element* element);
 		ElementVector* CreatePVector(Element* element);
+		void GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss);
 		void GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element);
 		void GradientJ(Vector<IssmDouble>* gradient,Element* element,int control_type,int control_index);
@@ -35,13 +36,12 @@
 
 		/*Intermediaries*/
-		void GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss);
+		IssmDouble EplStoring(Element* element,Gauss* gauss, Input2* epl_thick_input, Input2* epl_head_input, Input2* base_input);
+		IssmDouble EplTransmitivity(Element* element,Gauss* gauss, Input2* epl_thick_input, Input2* epl_head_input, Input2* base_input);
+		void GetHydrologyDCInefficientHmax(IssmDouble* ph_max,Element* element, Node* innode);
 		IssmDouble GetHydrologyKMatrixTransfer(Element* element);
-		IssmDouble GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input* sed_head_input);
-		IssmDouble EplStoring(Element* element,Gauss* gauss, Input* epl_thick_input, Input* epl_head_input, Input* base_input);
-		IssmDouble EplTransmitivity(Element* element,Gauss* gauss, Input* epl_thick_input, Input* epl_head_input, Input* base_input);
+		IssmDouble GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input2* sed_head_input);
+		void ComputeEPLThickness(FemModel* femmodel);
 		void HydrologyEPLGetMask(Vector<IssmDouble>* vec_mask, Vector<IssmDouble>* recurence, Element* element);
 		void HydrologyEPLGetActive(Vector<IssmDouble>* active_vec, Element* element);
-		void GetHydrologyDCInefficientHmax(IssmDouble* ph_max,Element* element, Node* innode);
-		void ComputeEPLThickness(FemModel* femmodel);
 };
 #endif
Index: /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.cpp	(revision 24686)
@@ -5,4 +5,5 @@
 #include "../shared/shared.h"
 #include "../modules/modules.h"
+#include "../classes/Inputs2/TransientInput2.h"
 
 /*Model processing*/
@@ -74,5 +75,5 @@
   iomodel->DeleteData(&requestedoutputs,numoutputs,"md.hydrology.requested_outputs");
 }/*}}}*/
-void HydrologyDCInefficientAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void HydrologyDCInefficientAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	bool   isefficientlayer;
@@ -93,23 +94,23 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.basal_moulin_input",HydrologydcBasalMoulinInputEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.sediment_head",SedimentHeadSubstepEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.sediment_transmitivity",HydrologydcSedimentTransmitivityEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.mask_thawed_node",HydrologydcMaskThawedNodeEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.basal_moulin_input",HydrologydcBasalMoulinInputEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.sediment_head",SedimentHeadSubstepEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.sediment_transmitivity",HydrologydcSedimentTransmitivityEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.mask_thawed_node",HydrologydcMaskThawedNodeEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 	if(isefficientlayer){
-		iomodel->FetchDataToInput(elements,"md.hydrology.mask_eplactive_node",HydrologydcMaskEplactiveNodeEnum);
-		iomodel->FetchDataToInput(elements,"md.initialization.epl_head",EplHeadSubstepEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.mask_eplactive_node",HydrologydcMaskEplactiveNodeEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.initialization.epl_head",EplHeadSubstepEnum);
 	}
 
@@ -200,6 +201,5 @@
 	}
 
-	Input* thawed_element_input = basalelement->GetInput(HydrologydcMaskThawedEltEnum); _assert_(thawed_element_input);
-	thawed_element_input->GetInputValue(&thawed_element);
+	basalelement->GetInput2Value(&thawed_element,HydrologydcMaskThawedEltEnum);
 
 	/*Check that all nodes are active, else return empty matrix*/
@@ -219,7 +219,4 @@
 	IssmDouble *xyz_list  = NULL;
 
-	/*Define transfer related variables*/
-	Input* active_element_input =NULL;
-
 	/*Fetch number of nodes and dof for this finite element*/
 	int numnodes = basalelement->GetNumberOfNodes();
@@ -235,12 +232,12 @@
 	basalelement ->FindParam(&dt,TimesteppingTimeStepEnum);
 	basalelement ->FindParam(&isefficientlayer,HydrologydcIsefficientlayerEnum);
-	Input* SedTrans_input = basalelement->GetInput(HydrologydcSedimentTransmitivityEnum); _assert_(SedTrans_input);
-	Input* sed_head_input = basalelement->GetInput(SedimentHeadSubstepEnum);
-	Input* base_input     = basalelement->GetInput(BaseEnum);
-	Input* old_wh_input = basalelement->GetInput(SedimentHeadOldEnum);                  _assert_(old_wh_input);
+	Input2* SedTrans_input = basalelement->GetInput2(HydrologydcSedimentTransmitivityEnum); _assert_(SedTrans_input);
+	Input2* sed_head_input = basalelement->GetInput2(SedimentHeadSubstepEnum);
+	Input2* base_input     = basalelement->GetInput2(BaseEnum);
+	Input2* old_wh_input   = basalelement->GetInput2(SedimentHeadOldEnum);                  _assert_(old_wh_input);
 
 	/*Transfer related Inputs*/
 	if(isefficientlayer){
-		active_element_input = basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
+		basalelement->GetInput2Value(&active_element,HydrologydcMaskEplactiveEltEnum);
 	}
 
@@ -279,5 +276,4 @@
 			/*Transfer EPL part*/
 			if(isefficientlayer){
-				active_element_input->GetInputValue(&active_element);
 				if(active_element){
 					transfer=GetHydrologyKMatrixTransfer(basalelement);
@@ -324,6 +320,5 @@
 	}
 
-	Input* thawed_element_input = basalelement->GetInput(HydrologydcMaskThawedEltEnum); _assert_(thawed_element_input);
-	thawed_element_input->GetInputValue(&thawed_element);
+	basalelement->GetInput2Value(&thawed_element,HydrologydcMaskThawedEltEnum);
 
 	/*Check that all nodes are active, else return empty matrix*/
@@ -340,4 +335,5 @@
 	int        smb_model;
 	int        smbsubstepping;
+	int        hydrologysubstepping;
 	IssmDouble dt,scalar,sediment_storing;
 	IssmDouble water_head,sediment_transmitivity;
@@ -346,8 +342,8 @@
 
 	IssmDouble *xyz_list             = NULL;
-	Input*      active_element_input = NULL;
-	Input*      old_wh_input         = NULL;
-	Input*      dummy_input          = NULL;
-	TransientInput*  surface_runoff_input          = NULL;
+	Input2     *active_element_input = NULL;
+	Input2     *old_wh_input         = NULL;
+	Input2     *dummy_input          = NULL;
+	Input2     *surface_runoff_input = NULL;
 
 	/*Fetch number of nodes and dof for this finite element*/
@@ -364,9 +360,9 @@
 	basalelement->FindParam(&smb_model,SmbEnum);
 
-	Input*	sed_head_input			 = basalelement->GetInput(SedimentHeadSubstepEnum);
-	Input*	epl_head_input			 = basalelement->GetInput(EplHeadSubstepEnum);
-	Input*	base_input				 = basalelement->GetInput(BaseEnum);
-	Input*	basal_melt_input		 = basalelement->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(basal_melt_input);
-	Input*	SedTrans_input			 = basalelement->GetInput(HydrologydcSedimentTransmitivityEnum); _assert_(SedTrans_input);
+	Input2*	sed_head_input   = basalelement->GetInput2(SedimentHeadSubstepEnum);
+	Input2*	epl_head_input   = basalelement->GetInput2(EplHeadSubstepEnum);
+	Input2*	base_input       = basalelement->GetInput2(BaseEnum);
+	Input2*	basal_melt_input = basalelement->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(basal_melt_input);
+	Input2*	SedTrans_input   = basalelement->GetInput2(HydrologydcSedimentTransmitivityEnum); _assert_(SedTrans_input);
 
 	IssmDouble time;
@@ -375,20 +371,25 @@
 
 	if(dt!= 0.){
-		old_wh_input = basalelement->GetInput(SedimentHeadOldEnum);                  _assert_(old_wh_input);
+		old_wh_input = basalelement->GetInput2(SedimentHeadOldEnum); _assert_(old_wh_input);
 	}
 	if(smb_model==SMBgradientscomponentsEnum){
 		basalelement->FindParam(&smbsubstepping,SmbStepsPerStepEnum);
-		if(smbsubstepping>1) {
-			dummy_input = basalelement->GetInput(SmbRunoffTransientEnum); _assert_(dummy_input);
-		}
-		else {
-			dummy_input = basalelement->GetInput(SmbRunoffEnum); _assert_(dummy_input);
-		}
-		surface_runoff_input=xDynamicCast<TransientInput*>(dummy_input); _assert_(surface_runoff_input);
+		basalelement->FindParam(&hydrologysubstepping,HydrologyStepsPerStepEnum);
+
+		if(smbsubstepping==1){
+			dummy_input = basalelement->GetInput2(SmbRunoffEnum); _assert_(dummy_input);
+		}
+		else if(smbsubstepping>1 && smbsubstepping<=hydrologysubstepping){
+			dummy_input = basalelement->GetInput2(SmbRunoffTransientEnum, time); _assert_(dummy_input);
+		}
+		else{
+			dummy_input = basalelement->GetInput2(SmbRunoffTransientEnum,time-dt,time); _assert_(dummy_input);
+		}
+		surface_runoff_input=xDynamicCast<Input2*>(dummy_input); _assert_(surface_runoff_input);
 	}
 
 	/*Transfer related Inputs*/
 	if(isefficientlayer){
-		active_element_input = basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
+		basalelement->GetInput2Value(&active_element,HydrologydcMaskEplactiveEltEnum);
 	}
 
@@ -419,5 +420,4 @@
 		else{
 			/*if EPL is present and active input is there not here*/
-			active_element_input->GetInputValue(&active_element);
 			if(!active_element){
 				basal_melt_input->GetInputValue(&water_load,gauss);
@@ -439,5 +439,4 @@
 			if(isefficientlayer){
 				/*Dealing with the sediment part of the transfer term*/
-				active_element_input->GetInputValue(&active_element);
 				if(active_element){
 					transfer=GetHydrologyPVectorTransfer(basalelement,gauss,epl_head_input);
@@ -575,7 +574,7 @@
 
 	/*Add input to the element: */
-	element->AddBasalInput(SedimentHeadSubstepEnum,values,P1Enum);
-	element->AddBasalInput(EffectivePressureSubstepEnum,pressure,P1Enum);
-	element->AddBasalInput(SedimentHeadResidualEnum,residual,P1Enum);
+	element->AddBasalInput2(SedimentHeadSubstepEnum,values,P1Enum);
+	element->AddBasalInput2(EffectivePressureSubstepEnum,pressure,P1Enum);
+	element->AddBasalInput2(SedimentHeadResidualEnum,residual,P1Enum);
 
 	/*Free ressources:*/
@@ -593,5 +592,6 @@
 	return;
 }/*}}}*/
-IssmDouble HydrologyDCInefficientAnalysis::SedimentStoring(Element* element,Gauss* gauss,Input* sed_head_input, Input* base_input){/*{{{*/
+/*Intermediaries*/
+IssmDouble HydrologyDCInefficientAnalysis::SedimentStoring(Element* element,Gauss* gauss,Input2* sed_head_input, Input2* base_input){/*{{{*/
 	int unconf_scheme;
 	IssmDouble expfac;
@@ -632,5 +632,5 @@
 	return sediment_storing;
 }/*}}}*/
-IssmDouble HydrologyDCInefficientAnalysis::SedimentTransmitivity(Element* element,Gauss* gauss,Input* sed_head_input, Input* base_input,Input* SedTrans_input){/*{{{*/
+IssmDouble HydrologyDCInefficientAnalysis::SedimentTransmitivity(Element* element,Gauss* gauss,Input2* sed_head_input, Input2* base_input,Input2* SedTrans_input){/*{{{*/
 	int unconf_scheme;
 	IssmDouble ratio,expfac;
@@ -722,5 +722,5 @@
 	return transfer;
 }/*}}}*/
-IssmDouble HydrologyDCInefficientAnalysis::GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input* epl_head_input){/*{{{*/
+IssmDouble HydrologyDCInefficientAnalysis::GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input2* epl_head_input){/*{{{*/
 
 	int transfermethod;
@@ -753,7 +753,7 @@
 	for(int i=0;i<femmodel->elements->Size();i++){
 		element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
-			Input* node_mask_input = element->GetInput(HydrologydcMaskEplactiveNodeEnum); _assert_(node_mask_input);
-
-		if(node_mask_input->Max()>0.){
+
+		Input2* input=element->GetInput2(HydrologydcMaskEplactiveNodeEnum); _assert_(input);
+		if(input->GetInputMax()>0.){
 			element_active = true;
 		}
@@ -761,5 +761,5 @@
 			element_active = false;
 		}
-		element->AddInput(new BoolInput(HydrologydcMaskEplactiveEltEnum,element_active));
+		element->SetBoolInput(element->inputs2,HydrologydcMaskEplactiveEltEnum,element_active);
 	}
 }/*}}}*/
@@ -782,5 +782,5 @@
 	}
 	/*Intermediaries*/
-	int						numnodes    =	basalelement->GetNumberOfNodes();
+	int				numnodes    =	basalelement->GetNumberOfNodes();
 	IssmDouble*		meltingrate =	xNew<IssmDouble>(numnodes);
 	IssmDouble*		groundedice =	xNew<IssmDouble>(numnodes);
@@ -813,7 +813,7 @@
 	for(int i=0;i<femmodel->elements->Size();i++){
 		element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
-			Input* node_mask_input = element->GetInput(HydrologydcMaskThawedNodeEnum); _assert_(node_mask_input);
-
-		if(node_mask_input->Max()>0.){
+
+		Input2* input=element->GetInput2(HydrologydcMaskThawedNodeEnum); _assert_(input);
+		if(input->GetInputMax()>0.){
 			element_active = true;
 		}
@@ -821,5 +821,5 @@
 			element_active = false;
 		}
-		element->AddInput(new BoolInput(HydrologydcMaskThawedEltEnum,element_active));
+		element->SetBoolInput(element->inputs2,HydrologydcMaskThawedEltEnum,element_active);
 	}
 }/*}}}*/
@@ -849,6 +849,5 @@
 	/*Pass the activity mask from elements to nodes*/
 	basalelement->GetInputListOnVertices(&active[0],HydrologydcMaskThawedNodeEnum);
-	Input* 	active_element_input=basalelement->GetInput(HydrologydcMaskThawedEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInput2Value(&active_element,HydrologydcMaskThawedEltEnum);
 
 	for(int i=0;i<numnodes;i++) flag+=active[i];
Index: /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyDCInefficientAnalysis.h	(revision 24686)
@@ -9,5 +9,5 @@
 #include "./Analysis.h"
 class Node;
-class Input;
+class Input2;
 class HydrologyDCInefficientAnalysis: public Analysis{
 
@@ -16,5 +16,5 @@
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		void CreateConstraints(Constraints* constraints,IoModel* iomodel);
@@ -34,9 +34,9 @@
 		/*Intermediaries*/
 		void GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss);
-		IssmDouble SedimentStoring(Element* element, Gauss* gauss, Input* sed_head_input, Input* base_input);
-		IssmDouble SedimentTransmitivity(Element* element,Gauss* gauss,Input* sed_head_input, Input* base_input,Input* SedTrans_input);
+		IssmDouble SedimentStoring(Element* element, Gauss* gauss, Input2* sed_head_input, Input2* base_input);
+		IssmDouble SedimentTransmitivity(Element* element,Gauss* gauss,Input2* sed_head_input, Input2* base_input,Input2* SedTrans_input);
 		void GetHydrologyDCInefficientHmax(IssmDouble* ph_max,Element* element, Node* innode);
 		IssmDouble GetHydrologyKMatrixTransfer(Element* element);
-		IssmDouble GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input* epl_head_input);
+		IssmDouble GetHydrologyPVectorTransfer(Element* element, Gauss* gauss, Input2* epl_head_input);
 		void ElementizeEplMask(FemModel* femmodel);
 		void HydrologyIDSGetMask(Vector<IssmDouble>* vec_mask, Element* element);
Index: /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.cpp	(revision 24686)
@@ -107,5 +107,5 @@
 	return 1;
 }/*}}}*/
-void HydrologyGlaDSAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void HydrologyGlaDSAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Fetch data needed: */
@@ -121,28 +121,28 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.bed",BedEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.bed",BedEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.bump_height",HydrologyBumpHeightEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.sheet_conductivity",HydrologySheetConductivityEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.neumannflux",HydrologyNeumannfluxEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.moulin_input",HydrologyMoulinInputEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.watercolumn",HydrologySheetThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.hydraulic_potential",HydraulicPotentialEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.bump_height",HydrologyBumpHeightEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.sheet_conductivity",HydrologySheetConductivityEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.neumannflux",HydrologyNeumannfluxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.moulin_input",HydrologyMoulinInputEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.watercolumn",HydrologySheetThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.hydraulic_potential",HydraulicPotentialEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
 	iomodel->FindConstant(&frictionlaw,"md.friction.law");
 
@@ -150,10 +150,10 @@
 	switch(frictionlaw){
 		case 1:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			break;
 		case 8:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
 			break;
 		default:
@@ -185,20 +185,27 @@
 	int frictionlaw;
 	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	if(frictionlaw==4 || frictionlaw==6){
+	if(frictionlaw==6){
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 	}
-	if(frictionlaw==3 || frictionlaw==1 || frictionlaw==7){
+	if(frictionlaw==4){
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+	}
+	if(frictionlaw==1 || frictionlaw==3 || frictionlaw==7){
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 	}
 	if(frictionlaw==9){
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 		parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
 	}
 
-  /*Requested outputs*/
-  iomodel->FindConstant(&requestedoutputs,&numoutputs,"md.hydrology.requested_outputs");
-  parameters->AddObject(new IntParam(HydrologyNumRequestedOutputsEnum,numoutputs));
-  if(numoutputs)parameters->AddObject(new StringArrayParam(HydrologyRequestedOutputsEnum,requestedoutputs,numoutputs));
-  iomodel->DeleteData(&requestedoutputs,numoutputs,"md.hydrology.requested_outputs");
+	/*Requested outputs*/
+	iomodel->FindConstant(&requestedoutputs,&numoutputs,"md.hydrology.requested_outputs");
+	parameters->AddObject(new IntParam(HydrologyNumRequestedOutputsEnum,numoutputs));
+	if(numoutputs)parameters->AddObject(new StringArrayParam(HydrologyRequestedOutputsEnum,requestedoutputs,numoutputs));
+	iomodel->DeleteData(&requestedoutputs,numoutputs,"md.hydrology.requested_outputs");
 }/*}}}*/
 
@@ -242,11 +249,11 @@
 	IssmDouble g         = element->FindParam(ConstantsGEnum);
 	IssmDouble e_v       = element->FindParam(HydrologyEnglacialVoidRatioEnum);
-	Input* k_input   = element->GetInput(HydrologySheetConductivityEnum);_assert_(k_input);
-	Input* phi_input = element->GetInput(HydraulicPotentialEnum);      _assert_(phi_input);
-	Input* h_input   = element->GetInput(HydrologySheetThicknessEnum); _assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* k_input   = element->GetInput2(HydrologySheetConductivityEnum);_assert_(k_input);
+	Input2* phi_input = element->GetInput2(HydraulicPotentialEnum);      _assert_(phi_input);
+	Input2* h_input   = element->GetInput2(HydrologySheetThicknessEnum); _assert_(h_input);
+	Input2* H_input      = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input      = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* B_input      = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input      = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -342,16 +349,16 @@
 	IssmDouble g         = element->FindParam(ConstantsGEnum);
 	IssmDouble e_v       = element->FindParam(HydrologyEnglacialVoidRatioEnum);
-	Input* hr_input     = element->GetInput(HydrologyBumpHeightEnum);_assert_(hr_input);
-	Input* vx_input     = element->GetInput(VxEnum);_assert_(vx_input);
-	Input* vy_input     = element->GetInput(VyEnum);_assert_(vy_input);
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* G_input      = element->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(G_input);
-	Input* m_input      = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);_assert_(m_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* phiold_input = element->GetInput(HydraulicPotentialOldEnum);      _assert_(phiold_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	Input2* hr_input     = element->GetInput2(HydrologyBumpHeightEnum);_assert_(hr_input);
+	Input2* vx_input     = element->GetInput2(VxEnum);_assert_(vx_input);
+	Input2* vy_input     = element->GetInput2(VyEnum);_assert_(vy_input);
+	Input2* h_input      = element->GetInput2(HydrologySheetThicknessEnum);_assert_(h_input);
+	Input2* H_input      = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input      = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* G_input      = element->GetInput2(BasalforcingsGeothermalfluxEnum);_assert_(G_input);
+	Input2* m_input      = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);_assert_(m_input);
+	Input2* B_input      = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input      = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* phiold_input = element->GetInput2(HydraulicPotentialOldEnum);      _assert_(phiold_input);
+	Input2* phi_input    = element->GetInput2(HydraulicPotentialEnum);         _assert_(phi_input);
 
 	/*Build friction element, needed later: */
@@ -479,13 +486,13 @@
 	IssmDouble rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
 	IssmDouble g         = element->FindParam(ConstantsGEnum);
-	Input* hr_input = element->GetInput(HydrologyBumpHeightEnum);_assert_(hr_input);
-	Input* vx_input = element->GetInput(VxEnum);_assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum);_assert_(vy_input);
-	Input* H_input  = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input  = element->GetInput(BedEnum); _assert_(b_input);
-	Input* hold_input  = element->GetInput(HydrologySheetThicknessOldEnum);_assert_(hold_input);
-	Input* B_input  = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input  = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* phi_input = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	Input2* hr_input = element->GetInput2(HydrologyBumpHeightEnum);_assert_(hr_input);
+	Input2* vx_input = element->GetInput2(VxEnum);_assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum);_assert_(vy_input);
+	Input2* H_input  = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input  = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* hold_input  = element->GetInput2(HydrologySheetThicknessOldEnum);_assert_(hold_input);
+	Input2* B_input  = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input  = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* phi_input = element->GetInput2(HydraulicPotentialEnum);         _assert_(phi_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -532,5 +539,5 @@
 	}
 
-	element->AddInput(HydrologySheetThicknessEnum,h_new,P1Enum);
+	element->AddInput2(HydrologySheetThicknessEnum,h_new,P1Enum);
 
 	/*Clean up and return*/
@@ -562,7 +569,7 @@
 	IssmDouble  rho_water = element->FindParam(MaterialsRhoFreshwaterEnum);
 	IssmDouble  g         = element->FindParam(ConstantsGEnum);
-	Input* H_input   = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input   = element->GetInput(BedEnum); _assert_(b_input);
-	Input* phi_input = element->GetInput(HydraulicPotentialEnum); _assert_(phi_input);
+	Input2* H_input   = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input   = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* phi_input = element->GetInput2(HydraulicPotentialEnum); _assert_(phi_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -592,5 +599,5 @@
 	}
 
-	element->AddInput(EffectivePressureEnum,N,element->FiniteElement());
+	element->AddInput2(EffectivePressureEnum,N,element->FiniteElement());
 
 	/*Clean up and return*/
Index: /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyGlaDSAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/HydrologyPismAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyPismAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyPismAnalysis.cpp	(revision 24686)
@@ -22,5 +22,5 @@
 	return 0;
 }/*}}}*/
-void HydrologyPismAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void HydrologyPismAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Fetch data needed: */
@@ -32,10 +32,10 @@
 
 	/*Add input to elements*/
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.drainage_rate",HydrologyDrainageRateEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.drainage_rate",HydrologyDrainageRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
 }/*}}}*/
 void HydrologyPismAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -117,5 +117,5 @@
 	IssmDouble* meltingrate  = xNew<IssmDouble>(numvertices);
  	IssmDouble* watercolumn_max  = xNew<IssmDouble>(numvertices);
-	element->GetInputListOnVertices(&watercolumn[0],WatercolumnEnum);
+	element->GetInputListOnVertices(&watercolumn[0],WaterColumnOldEnum);
 	element->GetInputListOnVertices(&drainagerate[0],HydrologyDrainageRateEnum);
 	element->GetInputListOnVertices(&meltingrate[0],BasalforcingsGroundediceMeltingRateEnum);
@@ -131,5 +131,6 @@
 
 	/* Divide by connectivity, add degree of channelization as an input */
-	element->AddInput(WatercolumnEnum,&watercolumn[0],P1Enum);
+	/*FIXME: should be changed to P1, this is due to the NR, IsFloating will return 0 on this element, but it should not be DG*/
+	element->AddInput2(WatercolumnEnum,&watercolumn[0],P1DGEnum);
 
 	/*Clean up and return*/
Index: /issm/trunk/src/c/analyses/HydrologyPismAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyPismAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyPismAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.cpp	(revision 24686)
@@ -80,5 +80,5 @@
 	return 1;
 }/*}}}*/
-void HydrologyShaktiAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void HydrologyShaktiAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Fetch data needed: */
@@ -94,29 +94,29 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.head",HydrologyHeadEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.gap_height",HydrologyGapHeightEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.englacial_input",HydrologyEnglacialInputEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.moulin_input",HydrologyMoulinInputEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.bump_spacing",HydrologyBumpSpacingEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.bump_height",HydrologyBumpHeightEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.reynolds",HydrologyReynoldsEnum);
-	iomodel->FetchDataToInput(elements,"md.hydrology.neumannflux",HydrologyNeumannfluxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.head",HydrologyHeadEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.gap_height",HydrologyGapHeightEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.englacial_input",HydrologyEnglacialInputEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.moulin_input",HydrologyMoulinInputEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.bump_spacing",HydrologyBumpSpacingEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.bump_height",HydrologyBumpHeightEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.reynolds",HydrologyReynoldsEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.neumannflux",HydrologyNeumannfluxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
 	iomodel->FindConstant(&frictionlaw,"md.friction.law");
 
@@ -124,10 +124,10 @@
 	switch(frictionlaw){
 		case 1:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			break;
 		case 8:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
 			break;
 		default:
@@ -244,17 +244,17 @@
 	IssmDouble  rho_ice         = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble  rho_water       = element->FindParam(MaterialsRhoFreshwaterEnum);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
-	Input* head_input           = element->GetInput(HydrologyHeadEnum);              _assert_(head_input);
-	Input* gap_input            = element->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
-	Input* thickness_input      = element->GetInput(ThicknessEnum);                  _assert_(thickness_input);
-	Input* base_input           = element->GetInput(BaseEnum);                       _assert_(base_input);
-	Input* B_input              = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input              = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* englacial_input      = element->GetInput(HydrologyEnglacialInputEnum);    _assert_(englacial_input);
-	Input* vx_input             = element->GetInput(VxEnum);                         _assert_(vx_input);
-	Input* vy_input             = element->GetInput(VyEnum);                         _assert_(vy_input);
-	Input* lr_input             = element->GetInput(HydrologyBumpSpacingEnum);       _assert_(lr_input);
-	Input* br_input             = element->GetInput(HydrologyBumpHeightEnum);        _assert_(br_input);
-   Input* headold_input        = element->GetInput(HydrologyHeadOldEnum);           _assert_(headold_input);
+	Input2* geothermalflux_input = element->GetInput2(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
+	Input2* head_input           = element->GetInput2(HydrologyHeadEnum);              _assert_(head_input);
+	Input2* gap_input            = element->GetInput2(HydrologyGapHeightEnum);         _assert_(gap_input);
+	Input2* thickness_input      = element->GetInput2(ThicknessEnum);                  _assert_(thickness_input);
+	Input2* base_input           = element->GetInput2(BaseEnum);                       _assert_(base_input);
+	Input2* B_input              = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input              = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* englacial_input      = element->GetInput2(HydrologyEnglacialInputEnum);    _assert_(englacial_input);
+	Input2* vx_input             = element->GetInput2(VxEnum);                         _assert_(vx_input);
+	Input2* vy_input             = element->GetInput2(VyEnum);                         _assert_(vy_input);
+	Input2* lr_input             = element->GetInput2(HydrologyBumpSpacingEnum);       _assert_(lr_input);
+	Input2* br_input             = element->GetInput2(HydrologyBumpHeightEnum);        _assert_(br_input);
+   Input2* headold_input        = element->GetInput2(HydrologyHeadOldEnum);           _assert_(headold_input);
 
 	/*Get conductivity from inputs*/
@@ -403,15 +403,19 @@
 
 	/*Add input to the element: */
-	element->AddInput(HydrologyHeadEnum,values,element->GetElementType());
-   element->AddInput(EffectivePressureEnum,eff_pressure,P1Enum);
+	element->AddInput2(HydrologyHeadEnum,values,element->GetElementType());
+   element->AddInput2(EffectivePressureEnum,eff_pressure,P1Enum);
 
 	/*Update reynolds number according to new solution*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* head_input = element->GetInput(HydrologyHeadEnum);_assert_(head_input);
-	head_input->GetInputDerivativeAverageValue(&dh[0],xyz_list);
+	Input2* head_input = element->GetInput2(HydrologyHeadEnum);_assert_(head_input);
 	IssmDouble conductivity = GetConductivity(element);
 
+	/*Get gap height derivatives at the center of the element*/
+	Gauss* gauss=element->NewGauss(1);
+	head_input->GetInputDerivativeValue(&dh[0],xyz_list,gauss);
+	delete gauss;
+
 	IssmDouble reynolds = conductivity*sqrt(dh[0]*dh[0]+dh[1]*dh[1])/NU;
-	element->AddInput(HydrologyReynoldsEnum,&reynolds,P0Enum);
+	element->AddInput2(HydrologyReynoldsEnum,&reynolds,P0Enum);
 
 	/*Free resources:*/
@@ -439,6 +443,6 @@
 
 	/*Get Reynolds and gap average values*/
-	Input* reynolds_input = element->GetInput(HydrologyReynoldsEnum);  _assert_(reynolds_input);
-	Input* gap_input      = element->GetInput(HydrologyGapHeightEnum); _assert_(gap_input);
+	Input2* reynolds_input = element->GetInput2(HydrologyReynoldsEnum);  _assert_(reynolds_input);
+	Input2* gap_input      = element->GetInput2(HydrologyGapHeightEnum); _assert_(gap_input);
 	reynolds_input->GetInputAverage(&reynolds);
 	gap_input->GetInputAverage(&gap);
@@ -482,16 +486,16 @@
 	IssmDouble  rho_ice         = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble  rho_water       = element->FindParam(MaterialsRhoFreshwaterEnum);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
-	Input* head_input           = element->GetInput(HydrologyHeadEnum);              _assert_(head_input);
-	Input* gap_input            = element->GetInput(HydrologyGapHeightEnum);         _assert_(gap_input);
-	Input* thickness_input      = element->GetInput(ThicknessEnum);                  _assert_(thickness_input);
-	Input* base_input           = element->GetInput(BaseEnum);                       _assert_(base_input);
-	Input* B_input              = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input              = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* englacial_input      = element->GetInput(HydrologyEnglacialInputEnum);    _assert_(englacial_input);
-	Input* vx_input             = element->GetInput(VxEnum);                         _assert_(vx_input);
-	Input* vy_input             = element->GetInput(VyEnum);                         _assert_(vy_input);
-	Input* lr_input             = element->GetInput(HydrologyBumpSpacingEnum);       _assert_(lr_input);
-	Input* br_input             = element->GetInput(HydrologyBumpHeightEnum);        _assert_(br_input);
+	Input2* geothermalflux_input = element->GetInput2(BasalforcingsGeothermalfluxEnum);_assert_(geothermalflux_input);
+	Input2* head_input           = element->GetInput2(HydrologyHeadEnum);              _assert_(head_input);
+	Input2* gap_input            = element->GetInput2(HydrologyGapHeightEnum);         _assert_(gap_input);
+	Input2* thickness_input      = element->GetInput2(ThicknessEnum);                  _assert_(thickness_input);
+	Input2* base_input           = element->GetInput2(BaseEnum);                       _assert_(base_input);
+	Input2* B_input              = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input              = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* englacial_input      = element->GetInput2(HydrologyEnglacialInputEnum);    _assert_(englacial_input);
+	Input2* vx_input             = element->GetInput2(VxEnum);                         _assert_(vx_input);
+	Input2* vy_input             = element->GetInput2(VyEnum);                         _assert_(vy_input);
+	Input2* lr_input             = element->GetInput2(HydrologyBumpSpacingEnum);       _assert_(lr_input);
+	Input2* br_input             = element->GetInput2(HydrologyBumpHeightEnum);        _assert_(br_input);
 
 	/*Get conductivity from inputs*/
@@ -578,13 +582,13 @@
 
 	/*Add new gap as an input*/
-	element->AddInput(HydrologyGapHeightEnum,&newgap,P0Enum);
+	element->AddInput2(HydrologyGapHeightEnum,&newgap,P0Enum);
 
 	/*Divide by connectivity, add basal flux as an input*/
 	q = q/totalweights;
-	element->AddInput(HydrologyBasalFluxEnum,&q,P0Enum);
+	element->AddInput2(HydrologyBasalFluxEnum,&q,P0Enum);
 
 	/* Divide by connectivity, add degree of channelization as an input */
 	channelization = channelization/totalweights;
-	element->AddInput(DegreeOfChannelizationEnum,&channelization,P0Enum);
+	element->AddInput2(DegreeOfChannelizationEnum,&channelization,P0Enum);
 
 	/*Clean up and return*/
Index: /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyShaktiAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.cpp	(revision 24686)
@@ -36,5 +36,5 @@
 	return 1;
 }/*}}}*/
-void HydrologyShreveAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void HydrologyShreveAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Fetch data needed: */
@@ -50,23 +50,23 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.watercolumn",WatercolumnEnum);
-
-	elements->InputDuplicate(WatercolumnEnum,WaterColumnOldEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.watercolumn",WatercolumnEnum);
+
+	inputs2->DuplicateInput(WatercolumnEnum,WaterColumnOldEnum);
 }/*}}}*/
 void HydrologyShreveAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -109,9 +109,9 @@
 	IssmDouble  g         = element->FindParam(ConstantsGEnum);
 	IssmDouble  mu_water  = element->FindParam(MaterialsMuWaterEnum);
-	Input* surfaceslopex_input = element->GetInput(SurfaceSlopeXEnum); _assert_(surfaceslopex_input);
-	Input* surfaceslopey_input = element->GetInput(SurfaceSlopeYEnum); _assert_(surfaceslopey_input);
-	Input* bedslopex_input     = element->GetInput(BedSlopeXEnum);     _assert_(bedslopex_input);
-	Input* bedslopey_input     = element->GetInput(BedSlopeYEnum);     _assert_(bedslopey_input);
-	Input* watercolumn_input   = element->GetInput(WatercolumnEnum);   _assert_(watercolumn_input);
+	Input2* surfaceslopex_input = element->GetInput2(SurfaceSlopeXEnum); _assert_(surfaceslopex_input);
+	Input2* surfaceslopey_input = element->GetInput2(SurfaceSlopeYEnum); _assert_(surfaceslopey_input);
+	Input2* bedslopex_input     = element->GetInput2(BedSlopeXEnum);     _assert_(bedslopex_input);
+	Input2* bedslopey_input     = element->GetInput2(BedSlopeYEnum);     _assert_(bedslopey_input);
+	Input2* watercolumn_input   = element->GetInput2(WatercolumnEnum);   _assert_(watercolumn_input);
 
 	/*Fetch number of vertices and allocate output*/
@@ -138,6 +138,6 @@
 
 	/*Add to inputs*/
-	element->AddInput(HydrologyWaterVxEnum,vx,P1Enum);
-	element->AddInput(HydrologyWaterVyEnum,vy,P1Enum);
+	element->AddInput2(HydrologyWaterVxEnum,vx,P1Enum);
+	element->AddInput2(HydrologyWaterVyEnum,vy,P1Enum);
 	xDelete<IssmDouble>(vx);
 	xDelete<IssmDouble>(vy);
@@ -172,6 +172,6 @@
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&diffusivity,HydrologyshreveStabilizationEnum);
-	Input* vx_input=element->GetInput(HydrologyWaterVxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(HydrologyWaterVyEnum); _assert_(vy_input);
+	Input2* vx_input=element->GetInput2(HydrologyWaterVxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(HydrologyWaterVyEnum); _assert_(vy_input);
 	h = element->CharacteristicLength();
 
@@ -257,6 +257,6 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input* mb_input   = element->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(mb_input);
-	Input* oldw_input = element->GetInput(WaterColumnOldEnum);                      _assert_(oldw_input);
+	Input2* mb_input   = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(mb_input);
+	Input2* oldw_input = element->GetInput2(WaterColumnOldEnum);                      _assert_(oldw_input);
 
 	/*Initialize mb_correction to 0, do not forget!:*/
@@ -368,5 +368,5 @@
 
 	/*Add input to the element: */
-	element->AddInput(WatercolumnEnum,values,element->GetElementType());
+	element->AddInput2(WatercolumnEnum,values,element->GetElementType());
 
 	/*Free ressources:*/
Index: /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/HydrologyShreveAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/L2ProjectionBaseAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/L2ProjectionBaseAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/L2ProjectionBaseAnalysis.cpp	(revision 24686)
@@ -28,5 +28,5 @@
 	return 1;
 }/*}}}*/
-void L2ProjectionBaseAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void L2ProjectionBaseAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -35,16 +35,16 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-   iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+   iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum & iomodel->domaintype!=Domain3DsurfaceEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 }/*}}}*/
@@ -148,6 +148,6 @@
 	int         input_enum;
 	IssmDouble  Jdet,value,slopes[2];
-	Input      *input     = NULL;
-	Input      *input2    = NULL;
+	Input2     *input     = NULL;
+	Input2     *input2    = NULL;
 	IssmDouble *xyz_list  = NULL;
 
@@ -163,13 +163,13 @@
 	basalelement->FindParam(&input_enum,InputToL2ProjectEnum);
 	switch(input_enum){
-		case SurfaceSlopeXEnum: input2 = basalelement->GetInput(SurfaceEnum); _assert_(input2); break;
-		case SurfaceSlopeYEnum: input2 = basalelement->GetInput(SurfaceEnum); _assert_(input2); break;
-		case BedSlopeXEnum:     input2 = basalelement->GetInput(BaseEnum);     _assert_(input2); break;
-		case BedSlopeYEnum:     input2 = basalelement->GetInput(BaseEnum);     _assert_(input2); break;
-		case BaseSlopeXEnum:    input2 = basalelement->GetInput(BaseEnum);    _assert_(input2); break;
-		case BaseSlopeYEnum:    input2 = basalelement->GetInput(BaseEnum);    _assert_(input2); break;
-		case LevelsetfunctionSlopeXEnum: input2 = basalelement->GetInput(MaskIceLevelsetEnum);     _assert_(input2); break;
-		case LevelsetfunctionSlopeYEnum: input2 = basalelement->GetInput(MaskIceLevelsetEnum);     _assert_(input2); break;
-		default: input = element->GetInput(input_enum);
+		case SurfaceSlopeXEnum: input2 = basalelement->GetInput2(SurfaceEnum); _assert_(input2); break;
+		case SurfaceSlopeYEnum: input2 = basalelement->GetInput2(SurfaceEnum); _assert_(input2); break;
+		case BedSlopeXEnum:     input2 = basalelement->GetInput2(BaseEnum);     _assert_(input2); break;
+		case BedSlopeYEnum:     input2 = basalelement->GetInput2(BaseEnum);     _assert_(input2); break;
+		case BaseSlopeXEnum:    input2 = basalelement->GetInput2(BaseEnum);    _assert_(input2); break;
+		case BaseSlopeYEnum:    input2 = basalelement->GetInput2(BaseEnum);    _assert_(input2); break;
+		case LevelsetfunctionSlopeXEnum: input2 = basalelement->GetInput2(MaskIceLevelsetEnum);     _assert_(input2); break;
+		case LevelsetfunctionSlopeYEnum: input2 = basalelement->GetInput2(MaskIceLevelsetEnum);     _assert_(input2); break;
+		default: input = element->GetInput2(input_enum);
 	}
 
Index: /issm/trunk/src/c/analyses/L2ProjectionBaseAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/L2ProjectionBaseAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/L2ProjectionBaseAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/L2ProjectionEPLAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/L2ProjectionEPLAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/L2ProjectionEPLAnalysis.cpp	(revision 24686)
@@ -37,5 +37,5 @@
 	return 1;
 }/*}}}*/
-void L2ProjectionEPLAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void L2ProjectionEPLAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	bool   isefficientlayer;
@@ -55,14 +55,14 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.initialization.epl_head",EplHeadSubstepEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.epl_head",EplHeadSubstepEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 }/*}}}*/
@@ -105,6 +105,5 @@
 	}
 
-	Input* active_element_input=basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInput2Value(&active_element,HydrologydcMaskEplactiveEltEnum);
 
 	/* Check that all nodes are active, else return empty matrix */
@@ -173,6 +172,5 @@
 	}
 
-	Input* active_element_input = basalelement->GetInput(HydrologydcMaskEplactiveEltEnum); _assert_(active_element_input);
-	active_element_input->GetInputValue(&active_element);
+	basalelement->GetInput2Value(&active_element,HydrologydcMaskEplactiveEltEnum);
 
 	/*Check that all nodes are active, else return empty matrix*/
@@ -188,5 +186,5 @@
 	int         input_enum,index;
 	IssmDouble  Jdet,slopes[2];
-	Input      *input     = NULL;
+	Input2     *input     = NULL;
 	IssmDouble *xyz_list  = NULL;
 
@@ -202,6 +200,6 @@
 	basalelement->FindParam(&input_enum,InputToL2ProjectEnum);
 	switch(input_enum){
-		case EplHeadSlopeXEnum: input = basalelement->GetInput(EplHeadSubstepEnum); index = 0; _assert_(input); break;
-		case EplHeadSlopeYEnum: input = basalelement->GetInput(EplHeadSubstepEnum); index = 1; _assert_(input); break;
+		case EplHeadSlopeXEnum: input = basalelement->GetInput2(EplHeadSubstepEnum); index = 0; _assert_(input); break;
+		case EplHeadSlopeYEnum: input = basalelement->GetInput2(EplHeadSubstepEnum); index = 1; _assert_(input); break;
 		default: _error_("not implemented");
 	}
Index: /issm/trunk/src/c/analyses/L2ProjectionEPLAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/L2ProjectionEPLAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/L2ProjectionEPLAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/LevelsetAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/LevelsetAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/LevelsetAnalysis.cpp	(revision 24686)
@@ -1,4 +1,4 @@
 #ifdef HAVE_CONFIG_H
-   #include <config.h>
+#include <config.h>
 #else
 #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
@@ -32,5 +32,5 @@
 }
 /*}}}*/
-void LevelsetAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void LevelsetAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Finite element type*/
@@ -43,12 +43,12 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
 
 	/*Get moving front parameters*/
@@ -57,26 +57,26 @@
 	switch(calvinglaw){
 		case DefaultCalvingEnum:
-			iomodel->FetchDataToInput(elements,"md.calving.calvingrate",CalvingCalvingrateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.calvingrate",CalvingCalvingrateEnum);
 			break;
 		case CalvingLevermannEnum:
-			iomodel->FetchDataToInput(elements,"md.calving.coeff",CalvinglevermannCoeffEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.coeff",CalvinglevermannCoeffEnum);
 			break;
 		case CalvingVonmisesEnum:
-			iomodel->FetchDataToInput(elements,"md.calving.stress_threshold_groundedice",CalvingStressThresholdGroundediceEnum);
-			iomodel->FetchDataToInput(elements,"md.calving.stress_threshold_floatingice",CalvingStressThresholdFloatingiceEnum);
-			iomodel->FetchDataToInput(elements,"md.geometry.bed",BedEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.stress_threshold_groundedice",CalvingStressThresholdGroundediceEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.stress_threshold_floatingice",CalvingStressThresholdFloatingiceEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.geometry.bed",BedEnum);
 			break;
 		case CalvingMinthicknessEnum:
-			iomodel->FetchDataToInput(elements,"md.geometry.bed",BedEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.geometry.bed",BedEnum);
 			break;
 		case CalvingHabEnum:
-			iomodel->FetchDataToInput(elements,"md.calving.flotation_fraction",CalvingHabFractionEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.flotation_fraction",CalvingHabFractionEnum);
 			break;
 		case CalvingCrevasseDepthEnum:
-			iomodel->FetchDataToInput(elements,"md.calving.water_height",WaterheightEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.water_height",WaterheightEnum);
 			break;
 		case CalvingDev2Enum:
-			iomodel->FetchDataToInput(elements,"md.calving.stress_threshold_groundedice",CalvingStressThresholdGroundediceEnum);
-			iomodel->FetchDataToInput(elements,"md.calving.stress_threshold_floatingice",CalvingStressThresholdFloatingiceEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.stress_threshold_groundedice",CalvingStressThresholdGroundediceEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.calving.stress_threshold_floatingice",CalvingStressThresholdFloatingiceEnum);
 			break;
 		default:
@@ -89,10 +89,10 @@
 	switch(melt_parameterization){
 		case FrontalForcingsDefaultEnum:
-			iomodel->FetchDataToInput(elements,"md.frontalforcings.meltingrate",CalvingMeltingrateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.frontalforcings.meltingrate",CalvingMeltingrateEnum);
 			break;
 		case FrontalForcingsRignotEnum:
-			iomodel->FetchDataToInput(elements,"md.frontalforcings.basin",FrontalForcingsBasinIdEnum);
-			iomodel->FetchDataToInput(elements,"md.frontalforcings.subglacial_discharge",FrontalForcingsSubglacialDischargeEnum);
-			iomodel->FetchDataToInput(elements,"md.frontalforcings.thermalforcing",FrontalForcingsThermalForcingEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.frontalforcings.basin",FrontalForcingsBasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.frontalforcings.subglacial_discharge",FrontalForcingsSubglacialDischargeEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.frontalforcings.thermalforcing",FrontalForcingsThermalForcingEnum);
 			break;
 		default:
@@ -226,28 +226,28 @@
 	basalelement->FindParam(&dt,TimesteppingTimeStepEnum);
 	basalelement->FindParam(&calvingmax,CalvingMaxEnum);
-	Input* vx_input           = NULL;
-	Input* vy_input           = NULL;
-	Input* calvingratex_input = NULL;
-	Input* calvingratey_input = NULL;
-	Input* lsf_slopex_input   = NULL;
-	Input* lsf_slopey_input   = NULL;
-	Input* calvingrate_input  = NULL;
-	Input* meltingrate_input  = NULL;
-	Input* gr_input           = NULL;
+	Input2* vx_input           = NULL;
+	Input2* vy_input           = NULL;
+	Input2* calvingratex_input = NULL;
+	Input2* calvingratey_input = NULL;
+	Input2* lsf_slopex_input   = NULL;
+	Input2* lsf_slopey_input   = NULL;
+	Input2* calvingrate_input  = NULL;
+	Input2* meltingrate_input  = NULL;
+	Input2* gr_input           = NULL;
 
 	/*Load velocities*/
 	switch(domaintype){
 		case Domain2DverticalEnum:
-			vx_input=basalelement->GetInput(VxEnum); _assert_(vx_input);
+			vx_input=basalelement->GetInput2(VxEnum); _assert_(vx_input);
 			break;
 		case Domain2DhorizontalEnum:
-			vx_input=basalelement->GetInput(VxEnum); _assert_(vx_input);
-			vy_input=basalelement->GetInput(VyEnum); _assert_(vy_input);
-			gr_input=basalelement->GetInput(MaskGroundediceLevelsetEnum); _assert_(gr_input);
+			vx_input=basalelement->GetInput2(VxEnum); _assert_(vx_input);
+			vy_input=basalelement->GetInput2(VyEnum); _assert_(vy_input);
+			gr_input=basalelement->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gr_input);
 			break;
 		case Domain3DEnum:
-			vx_input=basalelement->GetInput(VxAverageEnum); _assert_(vx_input);
-			vy_input=basalelement->GetInput(VyAverageEnum); _assert_(vy_input);
-			gr_input=basalelement->GetInput(MaskGroundediceLevelsetEnum); _assert_(gr_input);
+			vx_input=basalelement->GetInput2(VxAverageEnum); _assert_(vx_input);
+			vy_input=basalelement->GetInput2(VyAverageEnum); _assert_(vy_input);
+			gr_input=basalelement->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gr_input);
 			break;
 		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
@@ -258,47 +258,47 @@
 		case DefaultCalvingEnum:
 		case CalvingVonmisesEnum:
-			lsf_slopex_input  = basalelement->GetInput(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
-			if(dim==2) lsf_slopey_input  = basalelement->GetInput(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
-			calvingrate_input = basalelement->GetInput(CalvingCalvingrateEnum);     _assert_(calvingrate_input);
-			meltingrate_input = basalelement->GetInput(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
+			lsf_slopex_input  = basalelement->GetInput2(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
+			if(dim==2) lsf_slopey_input  = basalelement->GetInput2(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
+			calvingrate_input = basalelement->GetInput2(CalvingCalvingrateEnum);     _assert_(calvingrate_input);
+			meltingrate_input = basalelement->GetInput2(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
 			break;
 		case CalvingLevermannEnum:
 			switch(domaintype){
 				case Domain2DverticalEnum:
-					calvingratex_input=basalelement->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
+					calvingratex_input=basalelement->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
 					break;
 				case Domain2DhorizontalEnum:
-					calvingratex_input=basalelement->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-					calvingratey_input=basalelement->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
+					calvingratex_input=basalelement->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+					calvingratey_input=basalelement->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
 					break;
 				case Domain3DEnum:
-					calvingratex_input=basalelement->GetInput(CalvingratexAverageEnum); _assert_(calvingratex_input);
-					calvingratey_input=basalelement->GetInput(CalvingrateyAverageEnum); _assert_(calvingratey_input);
+					calvingratex_input=basalelement->GetInput2(CalvingratexAverageEnum); _assert_(calvingratex_input);
+					calvingratey_input=basalelement->GetInput2(CalvingrateyAverageEnum); _assert_(calvingratey_input);
 					break;
 				default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
 			}
-			meltingrate_input = basalelement->GetInput(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
+			meltingrate_input = basalelement->GetInput2(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
 			break;
 		case CalvingMinthicknessEnum:
-			lsf_slopex_input  = basalelement->GetInput(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
-			if(dim==2) lsf_slopey_input  = basalelement->GetInput(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
-			meltingrate_input = basalelement->GetInput(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
+			lsf_slopex_input  = basalelement->GetInput2(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
+			if(dim==2) lsf_slopey_input  = basalelement->GetInput2(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
+			meltingrate_input = basalelement->GetInput2(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
 			break;
 		case CalvingHabEnum:
-			lsf_slopex_input  = basalelement->GetInput(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
-			if(dim==2) lsf_slopey_input  = basalelement->GetInput(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
-			meltingrate_input = basalelement->GetInput(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
+			lsf_slopex_input  = basalelement->GetInput2(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
+			if(dim==2) lsf_slopey_input  = basalelement->GetInput2(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
+			meltingrate_input = basalelement->GetInput2(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
 			break;
 		case CalvingCrevasseDepthEnum:
-			lsf_slopex_input  = basalelement->GetInput(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
-			if(dim==2) lsf_slopey_input  = basalelement->GetInput(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
-			meltingrate_input = basalelement->GetInput(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
+			lsf_slopex_input  = basalelement->GetInput2(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
+			if(dim==2) lsf_slopey_input  = basalelement->GetInput2(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
+			meltingrate_input = basalelement->GetInput2(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
 			break;
 		case CalvingDev2Enum:
 			basalelement->FindParam(&calvinghaf,CalvingHeightAboveFloatationEnum);
-			lsf_slopex_input  = basalelement->GetInput(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
-			if(dim==2) lsf_slopey_input  = basalelement->GetInput(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
-			calvingrate_input = basalelement->GetInput(CalvingCalvingrateEnum);     _assert_(calvingrate_input);
-			meltingrate_input = basalelement->GetInput(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
+			lsf_slopex_input  = basalelement->GetInput2(LevelsetfunctionSlopeXEnum); _assert_(lsf_slopex_input);
+			if(dim==2) lsf_slopey_input  = basalelement->GetInput2(LevelsetfunctionSlopeYEnum); _assert_(lsf_slopey_input);
+			calvingrate_input = basalelement->GetInput2(CalvingCalvingrateEnum);     _assert_(calvingrate_input);
+			meltingrate_input = basalelement->GetInput2(CalvingMeltingrateEnum);     _assert_(meltingrate_input);
 			break;
 		default:
@@ -435,47 +435,47 @@
 
 			case CalvingDev2Enum:
-				{
-				lsf_slopex_input->GetInputValue(&dlsf[0],gauss);
-				if(dim==2) lsf_slopey_input->GetInputValue(&dlsf[1],gauss);
-				calvingrate_input->GetInputValue(&calvingrate,gauss);
-				meltingrate_input->GetInputValue(&meltingrate,gauss);
-				gr_input->GetInputValue(&groundedice,gauss);
-
-				//idea: no retreat on ice above critical calving height "calvinghaf" . Limit using regularized Heaviside function.
-				vel=sqrt(v[0]*v[0] + v[1]*v[1]);
-				haf_eps=10.;
-				if(groundedice-calvinghaf<=-haf_eps){
-					// ice floats freely below calvinghaf: calve freely
-					// undercutting has no effect:
-					meltingrate=0.;
-				}
-				else if(groundedice-calvinghaf>=haf_eps){
-					// ice is well above calvinghaf -> no calving back, i.e. limit calving rate to ice velocity
-					calvingrate=min(calvingrate,vel);
-					// ice is almost grounded: frontal undercutting has maximum effect (do nothing).
-				}
-				else{ // ice is close to calvinghaf: smooth transition between limitation and free calving.
-					//heaviside: 0 for floating, 1 for grounded
-					heaviside=(groundedice-calvinghaf+haf_eps)/(2.*haf_eps) + sin(PI*(groundedice-calvinghaf)/haf_eps)/(2.*PI);
-					calvingrate=heaviside*(min(calvingrate,vel)-calvingrate)+calvingrate;
-					meltingrate=heaviside*meltingrate+0.;
-				}
-
-				norm_dlsf=0.;
-				for(i=0;i<dim;i++) norm_dlsf+=pow(dlsf[i],2);
-				norm_dlsf=sqrt(norm_dlsf);
-
-				if(norm_dlsf>1.e-10)
-				 for(i=0;i<dim;i++){
-					 c[i]=calvingrate*dlsf[i]/norm_dlsf;
-					 m[i]=meltingrate*dlsf[i]/norm_dlsf;
-				 }
-				else
-				 for(i=0;i<dim;i++){
-					 c[i]=0.;
-					 m[i]=0.;
-				 }
-				break;
-				}
+				  {
+					lsf_slopex_input->GetInputValue(&dlsf[0],gauss);
+					if(dim==2) lsf_slopey_input->GetInputValue(&dlsf[1],gauss);
+					calvingrate_input->GetInputValue(&calvingrate,gauss);
+					meltingrate_input->GetInputValue(&meltingrate,gauss);
+					gr_input->GetInputValue(&groundedice,gauss);
+
+					//idea: no retreat on ice above critical calving height "calvinghaf" . Limit using regularized Heaviside function.
+					vel=sqrt(v[0]*v[0] + v[1]*v[1]);
+					haf_eps=10.;
+					if(groundedice-calvinghaf<=-haf_eps){
+						// ice floats freely below calvinghaf: calve freely
+						// undercutting has no effect:
+						meltingrate=0.;
+					}
+					else if(groundedice-calvinghaf>=haf_eps){
+						// ice is well above calvinghaf -> no calving back, i.e. limit calving rate to ice velocity
+						calvingrate=min(calvingrate,vel);
+						// ice is almost grounded: frontal undercutting has maximum effect (do nothing).
+					}
+					else{ // ice is close to calvinghaf: smooth transition between limitation and free calving.
+						//heaviside: 0 for floating, 1 for grounded
+						heaviside=(groundedice-calvinghaf+haf_eps)/(2.*haf_eps) + sin(PI*(groundedice-calvinghaf)/haf_eps)/(2.*PI);
+						calvingrate=heaviside*(min(calvingrate,vel)-calvingrate)+calvingrate;
+						meltingrate=heaviside*meltingrate+0.;
+					}
+
+					norm_dlsf=0.;
+					for(i=0;i<dim;i++) norm_dlsf+=pow(dlsf[i],2);
+					norm_dlsf=sqrt(norm_dlsf);
+
+					if(norm_dlsf>1.e-10)
+					 for(i=0;i<dim;i++){
+						 c[i]=calvingrate*dlsf[i]/norm_dlsf;
+						 m[i]=meltingrate*dlsf[i]/norm_dlsf;
+					 }
+					else
+					 for(i=0;i<dim;i++){
+						 c[i]=0.;
+						 m[i]=0.;
+					 }
+					break;
+				  }
 
 			default:
@@ -518,17 +518,15 @@
 			case 2:
 				  {
-				/* Streamline Upwinding */
-				basalelement->ElementSizes(&hx,&hy,&hz);
-				h=sqrt( pow(hx*w[0]/vel,2) + pow(hy*w[1]/vel,2) );
-				IssmDouble D[9];
-				for(row=0;row<dim;row++)
-					for(col=0;col<dim;col++)
-						D[row*dim+col] = D_scalar*h/(2.*vel)*w[row]*w[col];
-				GetBprime(Bprime,basalelement,xyz_list,gauss);
-
-				TripleMultiply(Bprime,dim,numnodes,1,
-							&D[0],dim,dim,0,
-							Bprime,dim,numnodes,0,
-							&Ke->values[0],1);
+					/* Streamline Upwinding */
+					basalelement->ElementSizes(&hx,&hy,&hz);
+					h=sqrt( pow(hx*w[0]/vel,2) + pow(hy*w[1]/vel,2) );
+					for(int i=0;i<numnodes;i++){
+						for(int j=0;j<numnodes;j++){
+							Ke->values[i*numnodes+j] += D_scalar*h/(2.*vel)*(
+										dbasis[0*numnodes+i] *(w[0]*w[0]*dbasis[0*numnodes+j] + w[0]*w[1]*dbasis[1*numnodes+j]) +
+										dbasis[1*numnodes+i] *(w[1]*w[0]*dbasis[0*numnodes+j] + w[1]*w[1]*dbasis[1*numnodes+j]) 
+										);
+						}
+					}
 				  }
 				break;
@@ -571,5 +569,5 @@
 		/*Retrieve all inputs and parameters*/
 		basalelement->GetVerticesCoordinates(&xyz_list);
-		Input* levelset_input     = basalelement->GetInput(MaskIceLevelsetEnum);                    _assert_(levelset_input);
+		Input2* levelset_input     = basalelement->GetInput2(MaskIceLevelsetEnum);                    _assert_(levelset_input);
 
 		/* Start  looping on the number of gaussian points: */
@@ -596,59 +594,4 @@
 	return pe;
 }/*}}}*/
-void           LevelsetAnalysis::GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
-	/*Compute B  matrix. B=[B1 B2 B3] where Bi is of size 3*NDOF2.
-	 * For node i, Bi can be expressed in the actual coordinate system
-	 * by:
-	 *       Bi=[ N ]
-	 *          [ N ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B_prog has been allocated already, of size: 2x(NDOF1*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions*/
-	IssmDouble* basis=xNew<IssmDouble>(numnodes);
-	element->NodalFunctions(basis,gauss);
-
-	/*Build B: */
-	for(int i=0;i<numnodes;i++){
-		B[numnodes*0+i] = basis[i];
-		B[numnodes*1+i] = basis[i];
-	}
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(basis);
-}/*}}}*/
-void           LevelsetAnalysis::GetBprime(IssmDouble* Bprime,Element* element,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
-	/*Compute B'  matrix. B'=[B1' B2' B3'] where Bi' is of size 3*NDOF2.
-	 * For node i, Bi' can be expressed in the actual coordinate system
-	 * by:
-	 *       Bi_prime=[ dN/dx ]
-	 *                [ dN/dy ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B' has been allocated already, of size: 3x(NDOF2*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions derivatives*/
-	IssmDouble* dbasis=xNew<IssmDouble>(2*numnodes);
-	element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
-
-	/*Build B': */
-	for(int i=0;i<numnodes;i++){
-		Bprime[numnodes*0+i] = dbasis[0*numnodes+i];
-		Bprime[numnodes*1+i] = dbasis[1*numnodes+i];
-	}
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(dbasis);
-
-}/*}}}*/
 IssmDouble     LevelsetAnalysis::GetDistanceToStraight(IssmDouble* q, IssmDouble* s0, IssmDouble* s1){/*{{{*/
 	// returns distance d of point q to straight going through points s0, s1
@@ -669,5 +612,5 @@
 	norm_b=0.;
 	for(i=0;i<dim;i++)
-		norm_b+=b[i]*b[i];
+	 norm_b+=b[i]*b[i];
 	norm_b=sqrt(norm_b);
 	_assert_(norm_b>0.);
@@ -718,7 +661,7 @@
 			int      numnodes = element->GetNumberOfNodes();
 			Gauss*   gauss    = element->NewGauss();
-			Input*   H_input  = element->GetInput(ThicknessEnum); _assert_(H_input);
-			Input*   b_input = element->GetInput(BedEnum); _assert_(b_input);
-			Input*   sl_input = element->GetInput(SealevelEnum); _assert_(sl_input);
+			Input2*   H_input  = element->GetInput2(ThicknessEnum); _assert_(H_input);
+			Input2*   b_input = element->GetInput2(BedEnum); _assert_(b_input);
+			Input2*   sl_input = element->GetInput2(SealevelEnum); _assert_(sl_input);
 
 			/*Potentially constrain nodes of this element*/
@@ -746,5 +689,5 @@
 
 		/*Get the fraction of the flotation thickness at the terminus*/
-		femmodel->elements->InputDuplicate(MaskIceLevelsetEnum, DistanceToCalvingfrontEnum);
+		InputDuplicatex(femmodel,MaskIceLevelsetEnum,DistanceToCalvingfrontEnum);
 		femmodel->DistanceToFieldValue(MaskIceLevelsetEnum,0,DistanceToCalvingfrontEnum);
 
@@ -758,8 +701,8 @@
 			int      numnodes           = element->GetNumberOfNodes();
 			Gauss*   gauss              = element->NewGauss();
-			Input*   H_input            = element->GetInput(ThicknessEnum); _assert_(H_input);
-			Input*   bed_input          = element->GetInput(BedEnum); _assert_(bed_input);
-			Input*   hab_fraction_input = element->GetInput(CalvingHabFractionEnum); _assert_(hab_fraction_input);
-			Input*   ls_input           = element->GetInput(DistanceToCalvingfrontEnum); _assert_(ls_input);
+			Input2*   H_input            = element->GetInput2(ThicknessEnum); _assert_(H_input);
+			Input2*   bed_input          = element->GetInput2(BedEnum); _assert_(bed_input);
+			Input2*   hab_fraction_input = element->GetInput2(CalvingHabFractionEnum); _assert_(hab_fraction_input);
+			Input2*   ls_input           = element->GetInput2(DistanceToCalvingfrontEnum); _assert_(ls_input);
 
 			/*Potentially constrain nodes of this element*/
@@ -793,5 +736,5 @@
 
 		/*Get the DistanceToCalvingfront*/
-		femmodel->elements->InputDuplicate(MaskIceLevelsetEnum,DistanceToCalvingfrontEnum);
+		InputDuplicatex(femmodel,MaskIceLevelsetEnum,DistanceToCalvingfrontEnum);
 		femmodel->DistanceToFieldValue(MaskIceLevelsetEnum,0,DistanceToCalvingfrontEnum);
 
@@ -803,9 +746,9 @@
 			int      numnodes              = element->GetNumberOfNodes();
 			Gauss*   gauss                 = element->NewGauss();
-			Input*   crevassedepth_input   = element->GetInput(CrevasseDepthEnum); _assert_(crevassedepth_input);
-			Input*   bed_input             = element->GetInput(BedEnum); _assert_(bed_input);
-			Input*   surface_crevasse_input = element->GetInput(SurfaceCrevasseEnum); _assert_(surface_crevasse_input);
-			Input*   thickness_input       = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-			Input*   surface_input         = element->GetInput(SurfaceEnum); _assert_(surface_input);
+			Input2*   crevassedepth_input   = element->GetInput2(CrevasseDepthEnum); _assert_(crevassedepth_input);
+			Input2*   bed_input             = element->GetInput2(BedEnum); _assert_(bed_input);
+			Input2*   surface_crevasse_input = element->GetInput2(SurfaceCrevasseEnum); _assert_(surface_crevasse_input);
+			Input2*   thickness_input       = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+			Input2*   surface_input         = element->GetInput2(SurfaceEnum); _assert_(surface_input);
 
 			/*First, look at ice front and figure out if any of the nodes will be calved*/
@@ -841,10 +784,10 @@
 				int      numnodes               = element->GetNumberOfNodes();
 				Gauss*   gauss                  = element->NewGauss();
-				Input*   levelset_input         = element->GetInput(DistanceToCalvingfrontEnum); _assert_(levelset_input);
-				Input*   crevassedepth_input    = element->GetInput(CrevasseDepthEnum); _assert_(crevassedepth_input);
-				Input*   bed_input              = element->GetInput(BedEnum); _assert_(bed_input);
-				Input*   surface_crevasse_input = element->GetInput(SurfaceCrevasseEnum); _assert_(surface_crevasse_input);
-				Input*   thickness_input        = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-				Input*   surface_input          = element->GetInput(SurfaceEnum); _assert_(surface_input);
+				Input2*   levelset_input         = element->GetInput2(DistanceToCalvingfrontEnum); _assert_(levelset_input);
+				Input2*   crevassedepth_input    = element->GetInput2(CrevasseDepthEnum); _assert_(crevassedepth_input);
+				Input2*   bed_input              = element->GetInput2(BedEnum); _assert_(bed_input);
+				Input2*   surface_crevasse_input = element->GetInput2(SurfaceCrevasseEnum); _assert_(surface_crevasse_input);
+				Input2*   thickness_input        = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+				Input2*   surface_input          = element->GetInput2(SurfaceEnum); _assert_(surface_input);
 
 				/*Is this element connected to a node that should be calved*/
Index: /issm/trunk/src/c/analyses/LevelsetAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/LevelsetAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/LevelsetAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
@@ -26,6 +26,4 @@
 		ElementMatrix* CreateKMatrix(Element* element);
 		ElementVector* CreatePVector(Element* element);
-		void           GetB(IssmDouble* B,Element* element,IssmDouble* xyz_list,Gauss* gauss);
-		void           GetBprime(IssmDouble* Bprime,Element* element,IssmDouble* xyz_list,Gauss* gauss);
 		IssmDouble     GetDistanceToStraight(IssmDouble* q, IssmDouble* s0, IssmDouble* s1);
 		void           GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element);
Index: /issm/trunk/src/c/analyses/LoveAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/LoveAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/LoveAnalysis.cpp	(revision 24686)
@@ -15,5 +15,5 @@
 	_error_("not needed!");
 }/*}}}*/
-void LoveAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void LoveAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/LoveAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/LoveAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/LoveAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/MasstransportAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/MasstransportAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/MasstransportAnalysis.cpp	(revision 24686)
@@ -111,5 +111,5 @@
 	return 1;
 }/*}}}*/
-void MasstransportAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void MasstransportAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int    stabilization,finiteelement;
@@ -140,23 +140,23 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	if(isgroundingline) 	iomodel->FetchDataToInput(elements,"md.geometry.bed",BedEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	if(isgroundingline) 	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.bed",BedEnum);
 	/*Initialize cumdeltalthickness input*/
-	InputUpdateFromConstantx(elements,0.,SealevelriseCumDeltathicknessEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelriseCumDeltathicknessEnum);
 	/*Initialize ThicknessResidual input*/
-	InputUpdateFromConstantx(elements,0.,ThicknessResidualEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,ThicknessResidualEnum);
 
 	/*Get what we need for ocean-induced basal melting*/
@@ -165,7 +165,8 @@
 	switch(basalforcing_model){
 		case FloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
 			break;
 		case LinearFloatingMeltRateEnum:
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.perturbation_melting_rate",BasalforcingsPerturbationMeltingRateEnum,0.);
 			break;
 		case MismipFloatingMeltRateEnum:
@@ -174,14 +175,15 @@
 			break;
 		case SpatialLinearFloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
 			break;
 		case BasalforcingsPicoEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.overturning_coeff",BasalforcingsPicoOverturningCoeffEnum);
 			break;
 		case BasalforcingsIsmip6Enum:{
-			iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.melt_anomaly",BasalforcingsIsmip6MeltAnomalyEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.melt_anomaly",BasalforcingsIsmip6MeltAnomalyEnum,0.);
 			IssmDouble** array3d = NULL; int* Ms = NULL; int* Ns = NULL; int K;
 			iomodel->FetchData(&array3d,&Ms,&Ns,&K,"md.basalforcings.tf");
@@ -191,5 +193,5 @@
 				if(iomodel->domaintype!=Domain2DhorizontalEnum && !element->IsOnBase()) continue;
 				for(int kk=0;kk<K;kk++){
-					element->DatasetInputAdd(BasalforcingsIsmip6TfEnum,array3d[kk],iomodel,Ms[kk],Ns[kk],1,BasalforcingsIsmip6TfEnum,7,kk);
+					element->DatasetInputAdd(BasalforcingsIsmip6TfEnum,array3d[kk],inputs2,iomodel,Ms[kk],Ns[kk],1,BasalforcingsIsmip6TfEnum,7,kk);
 				}
 			}
@@ -200,6 +202,6 @@
 			break;
 		case BeckmannGoosseFloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
 			break;
 		default:
@@ -208,16 +210,16 @@
 
 	if(!issmb){
-		iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum);
 	}
 	if(stabilization==3){
-		iomodel->FetchDataToInput(elements,"md.masstransport.spcthickness",MasstransportSpcthicknessEnum); //for DG, we need the spc in the element
+		iomodel->FetchDataToInput(inputs2,elements,"md.masstransport.spcthickness",MasstransportSpcthicknessEnum); //for DG, we need the spc in the element
 	}
 	if(stabilization==4){
-		iomodel->FetchDataToInput(elements,"md.masstransport.spcthickness",MasstransportSpcthicknessEnum); //for FCT, we need the spc in the element (penlaties)
+		iomodel->FetchDataToInput(inputs2,elements,"md.masstransport.spcthickness",MasstransportSpcthicknessEnum); //for FCT, we need the spc in the element (penlaties)
 	}
 
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 
@@ -291,4 +293,5 @@
 	IssmDouble xi,tau;
 	IssmDouble dvx[2],dvy[2];
+	IssmDouble D[4];
 	IssmDouble* xyz_list = NULL;
 
@@ -309,7 +312,4 @@
 	IssmDouble*    basis  = xNew<IssmDouble>(numnodes);
 	IssmDouble*		dbasis = xNew<IssmDouble>(dim*numnodes);
-	IssmDouble*    B      = xNew<IssmDouble>(dim*numnodes);
-	IssmDouble*    Bprime = xNew<IssmDouble>(dim*numnodes);
-	IssmDouble*    D      = xNewZeroInit<IssmDouble>(dim*dim);
 
 	/*Retrieve all inputs and parameters*/
@@ -318,8 +318,8 @@
 	element->FindParam(&domaintype,DomainTypeEnum);
 	element->FindParam(&stabilization,MasstransportStabilizationEnum);
-	Input* vxaverage_input=element->GetInput(VxAverageEnum); _assert_(vxaverage_input);
-	Input* vyaverage_input=NULL;
+	Input2* vxaverage_input=element->GetInput2(VxAverageEnum); _assert_(vxaverage_input);
+	Input2* vyaverage_input=NULL;
 	if(dim==2){
-		vyaverage_input=element->GetInput(VyAverageEnum); _assert_(vyaverage_input);
+		vyaverage_input=element->GetInput2(VyAverageEnum); _assert_(vyaverage_input);
 	}
 
@@ -333,41 +333,43 @@
 		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
 		element->NodalFunctions(basis,gauss);
-
+		element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
+
+		/*Transient term*/
+		D_scalar=gauss->weight*Jdet;
+		for(int i=0;i<numnodes;i++){
+			for(int j=0;j<numnodes;j++){
+				Ke->values[i*numnodes+j] += D_scalar*basis[i]*basis[j];
+			}
+		}
+
+		/*Advection terms*/
 		vxaverage_input->GetInputValue(&vx,gauss);
 		vxaverage_input->GetInputDerivativeValue(&dvx[0],xyz_list,gauss);
+		D_scalar=dt*gauss->weight*Jdet;
 		if(dim==2){
 			vyaverage_input->GetInputValue(&vy,gauss);
 			vyaverage_input->GetInputDerivativeValue(&dvy[0],xyz_list,gauss);
-		}
-
-		D_scalar=gauss->weight*Jdet;
-		TripleMultiply(basis,1,numnodes,1,
-					&D_scalar,1,1,0,
-					basis,1,numnodes,0,
-					&Ke->values[0],1);
-
-		GetB(B,element,dim,xyz_list,gauss);
-		GetBprime(Bprime,element,dim,xyz_list,gauss);
-
-		dvxdx=dvx[0];
-		if(dim==2) dvydy=dvy[1];
-		D_scalar=dt*gauss->weight*Jdet;
-
-		D[0*dim+0]=D_scalar*dvxdx;
-		if(dim==2) D[1*dim+1]=D_scalar*dvydy;
-
-		TripleMultiply(B,dim,numnodes,1,
-					D,dim,dim,0,
-					B,dim,numnodes,0,
-					&Ke->values[0],1);
-
-		D[0*dim+0]=D_scalar*vx;
-		if(dim==2) D[1*dim+1]=D_scalar*vy;
-
-		TripleMultiply(B,dim,numnodes,1,
-					D,dim,dim,0,
-					Bprime,dim,numnodes,0,
-					&Ke->values[0],1);
-
+			dvxdx=dvx[0];
+			dvydy=dvy[1];
+			for(int i=0;i<numnodes;i++){
+				for(int j=0;j<numnodes;j++){
+					/*\phi_i \phi_j \nabla\cdot v*/
+					Ke->values[i*numnodes+j] += D_scalar*basis[i]*basis[j]*(dvxdx+dvydy);
+					/*\phi_i v\cdot\nabla\phi_j*/
+					Ke->values[i*numnodes+j] += D_scalar*basis[i]*(vx*dbasis[0*numnodes+j] + vy*dbasis[1*numnodes+j]);
+				}
+			}
+		}
+		else{
+			dvxdx=dvx[0];
+			for(int i=0;i<numnodes;i++){
+				for(int j=0;j<numnodes;j++){
+					Ke->values[i*numnodes+j] += D_scalar*dvxdx*dbasis[0*numnodes+i]*dbasis[0*numnodes+j];
+					Ke->values[i*numnodes+j] += D_scalar*vx*dbasis[0*numnodes+j]*basis[i];
+				}
+			}
+		}
+
+		for(int i=0;i<4;i++) D[i]=0.;
 		switch(stabilization){
 			case 0:
@@ -383,5 +385,4 @@
 			case 2:
 				/*Streamline upwinding*/
-				element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
 				vxaverage_input->GetInputAverage(&vx);
 				if(dim==1){
@@ -397,5 +398,4 @@
 				/*SUPG*/
 				if(dim!=2) _error_("Stabilization "<<stabilization<<" not supported yet for dim != 2");
-				element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
 				vxaverage_input->GetInputAverage(&vx);
 				vyaverage_input->GetInputAverage(&vy);
@@ -418,11 +418,25 @@
 			}
 
-			TripleMultiply(Bprime,dim,numnodes,1,
-						D,dim,dim,0,
-						Bprime,dim,numnodes,0,
-						&Ke->values[0],1);
+			if(dim==2){
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j] += (
+									dbasis[0*numnodes+i] *(D[0*dim+0]*dbasis[0*numnodes+j] + D[0*dim+1]*dbasis[1*numnodes+j]) +
+									dbasis[1*numnodes+i] *(D[1*dim+0]*dbasis[0*numnodes+j] + D[1*dim+1]*dbasis[1*numnodes+j]) 
+									);
+					}
+				}
+			}
+			else{
+				for(int i=0;i<numnodes;i++){
+					for(int j=0;j<numnodes;j++){
+						Ke->values[i*numnodes+j] += D_scalar*h/(2.*vel)*dbasis[0*numnodes+i] *D[0]*dbasis[0*numnodes+j];
+					}
+				}
+			}
 		}
 		if(stabilization==2){
 			/*Streamline upwind*/
+			_assert_(dim==2);
 			for(int i=0;i<numnodes;i++){
 				for(int j=0;j<numnodes;j++){
@@ -477,7 +491,4 @@
 	xDelete<IssmDouble>(basis);
 	xDelete<IssmDouble>(dbasis);
-	xDelete<IssmDouble>(B);
-	xDelete<IssmDouble>(Bprime);
-	xDelete<IssmDouble>(D);
 	delete gauss;
 	return Ke;
@@ -499,7 +510,5 @@
 	ElementMatrix* Ke     = element->NewElementMatrix();
 	IssmDouble*    basis  = xNew<IssmDouble>(numnodes);
-	IssmDouble*    B      = xNew<IssmDouble>(2*numnodes);
-	IssmDouble*    Bprime = xNew<IssmDouble>(2*numnodes);
-	IssmDouble     D[2][2];
+	IssmDouble*    dbasis = xNew<IssmDouble>(3*numnodes);
 
 	/*Retrieve all inputs and parameters*/
@@ -507,6 +516,6 @@
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&domaintype,DomainTypeEnum);
-	Input* vxaverage_input=element->GetInput(VxAverageEnum); _assert_(vxaverage_input);
-	Input* vyaverage_input=element->GetInput(VyAverageEnum); _assert_(vyaverage_input);
+	Input2* vxaverage_input=element->GetInput2(VxAverageEnum); _assert_(vxaverage_input);
+	Input2* vyaverage_input=element->GetInput2(VyAverageEnum); _assert_(vyaverage_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -517,4 +526,5 @@
 		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
 		element->NodalFunctions(basis,gauss);
+		element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
 
 		vxaverage_input->GetInputValue(&vx,gauss);
@@ -522,23 +532,17 @@
 
 		D_scalar=gauss->weight*Jdet;
-		TripleMultiply(basis,1,numnodes,1,
-					&D_scalar,1,1,0,
-					basis,1,numnodes,0,
-					&Ke->values[0],1);
-
-		/*WARNING: B and Bprime are inverted compared to CG*/
-		GetB(Bprime,element,2,xyz_list,gauss);
-		GetBprime(B,element,2,xyz_list,gauss);
-
+		for(int i=0;i<numnodes;i++){
+			for(int j=0;j<numnodes;j++){
+				Ke->values[i*numnodes+j] += D_scalar*basis[i]*basis[j];
+			}
+		}
+
+		/*WARNING: basis and dbasis are inverted compared to CG*/
 		D_scalar = - dt*gauss->weight*Jdet;
-		D[0][0]  = D_scalar*vx;
-		D[0][1]  = 0.;
-		D[1][0]  = 0.;
-		D[1][1]  = D_scalar*vy;
-		TripleMultiply(B,2,numnodes,1,
-					&D[0][0],2,2,0,
-					Bprime,2,numnodes,0,
-					&Ke->values[0],1);
-
+		for(int i=0;i<numnodes;i++){
+			for(int j=0;j<numnodes;j++){
+				Ke->values[i*numnodes+j] += D_scalar*(vx*dbasis[0*numnodes+i]*basis[j] + vy*dbasis[1*numnodes+i]*basis[j]);
+			}
+		}
 	}
 
@@ -546,6 +550,5 @@
 	xDelete<IssmDouble>(xyz_list);
 	xDelete<IssmDouble>(basis);
-	xDelete<IssmDouble>(B);
-	xDelete<IssmDouble>(Bprime);
+	xDelete<IssmDouble>(dbasis);
 	delete gauss;
 	return Ke;
@@ -617,11 +620,16 @@
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&stabilization,MasstransportStabilizationEnum);
-	Input* gmb_input        = element->GetInput(BasalforcingsGroundediceMeltingRateEnum);  _assert_(gmb_input);
-	Input* fmb_input        = element->GetInput(BasalforcingsFloatingiceMeltingRateEnum);  _assert_(fmb_input);
-	Input* gllevelset_input = element->GetInput(MaskGroundediceLevelsetEnum);              _assert_(gllevelset_input);
-	Input* ms_input         = element->GetInput(SmbMassBalanceEnum);                       _assert_(ms_input);
-	Input* thickness_input  = element->GetInput(ThicknessEnum);                            _assert_(thickness_input);
-	Input* vxaverage_input  = element->GetInput(VxAverageEnum);										_assert_(vxaverage_input);
-	Input* vyaverage_input  = element->GetInput(VyAverageEnum);										_assert_(vyaverage_input);
+	Input2* gmb_input        = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum);  _assert_(gmb_input);
+	Input2* fmb_input        = element->GetInput2(BasalforcingsFloatingiceMeltingRateEnum);  _assert_(fmb_input);
+	Input2* gllevelset_input = element->GetInput2(MaskGroundediceLevelsetEnum);              _assert_(gllevelset_input);
+	Input2* ms_input         = element->GetInput2(SmbMassBalanceEnum);                       _assert_(ms_input);
+	Input2* thickness_input  = element->GetInput2(ThicknessEnum);                            _assert_(thickness_input);
+	Input2* vxaverage_input  = element->GetInput2(VxAverageEnum);										_assert_(vxaverage_input);
+	Input2* vyaverage_input  = element->GetInput2(VyAverageEnum);										_assert_(vyaverage_input);
+
+//	if(element->Id()==9){
+//		gmb_input->Echo();
+//		_error_("S");
+//	}
 
 	h=element->CharacteristicLength();
@@ -726,9 +734,9 @@
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&melt_style,GroundinglineMeltInterpolationEnum);
-	Input* gmb_input        = element->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(gmb_input);
-	Input* fmb_input        = element->GetInput(BasalforcingsFloatingiceMeltingRateEnum); _assert_(fmb_input);
-	Input* ms_input         = element->GetInput(SmbMassBalanceEnum);                      _assert_(ms_input);
-	Input* gllevelset_input = element->GetInput(MaskGroundediceLevelsetEnum);             _assert_(gllevelset_input);
-	Input* thickness_input  = element->GetInput(ThicknessEnum);                           _assert_(thickness_input);
+	Input2* gmb_input        = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(gmb_input);
+	Input2* fmb_input        = element->GetInput2(BasalforcingsFloatingiceMeltingRateEnum); _assert_(fmb_input);
+	Input2* ms_input         = element->GetInput2(SmbMassBalanceEnum);                      _assert_(ms_input);
+	Input2* gllevelset_input = element->GetInput2(MaskGroundediceLevelsetEnum);             _assert_(gllevelset_input);
+	Input2* thickness_input  = element->GetInput2(ThicknessEnum);                           _assert_(thickness_input);
 
    /*Recover portion of element that is grounded*/
@@ -783,61 +791,4 @@
 	return pe;
 }/*}}}*/
-void           MasstransportAnalysis::GetB(IssmDouble* B,Element* element,int dim,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
-	/*Compute B  matrix. B=[B1 B2 B3] where Bi is of size 3*NDOF2.
-	 * For node i, Bi can be expressed in the actual coordinate system
-	 * by:
-	 *       Bi=[ N ]
-	 *          [ N ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B_prog has been allocated already, of size: 2x(NDOF1*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions*/
-	IssmDouble* basis=xNew<IssmDouble>(numnodes);
-	element->NodalFunctions(basis,gauss);
-
-	/*Build B: */
-	for(int i=0;i<numnodes;i++){
-		for(int j=0;j<dim;j++){
-			B[numnodes*j+i] = basis[i];
-		}
-	}
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(basis);
-}/*}}}*/
-void           MasstransportAnalysis::GetBprime(IssmDouble* Bprime,Element* element,int dim,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
-	/*Compute B'  matrix. B'=[B1' B2' B3'] where Bi' is of size 3*NDOF2.
-	 * For node i, Bi' can be expressed in the actual coordinate system
-	 * by:
-	 *       Bi_prime=[ dN/dx ]
-	 *                [ dN/dy ]
-	 * where N is the finiteelement function for node i.
-	 *
-	 * We assume B' has been allocated already, of size: 3x(NDOF2*numnodes)
-	 */
-
-	/*Fetch number of nodes for this finite element*/
-	int numnodes = element->GetNumberOfNodes();
-
-	/*Get nodal functions derivatives*/
-	IssmDouble* dbasis=xNew<IssmDouble>(dim*numnodes);
-	element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
-
-	/*Build B': */
-	for(int i=0;i<numnodes;i++){
-		for(int j=0;j<dim;j++){
-			Bprime[numnodes*j+i] = dbasis[j*numnodes+i];
-		}
-	}
-
-	/*Clean-up*/
-	xDelete<IssmDouble>(dbasis);
-
-}/*}}}*/
 void           MasstransportAnalysis::GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element){/*{{{*/
 	element->GetSolutionFromInputsOneDof(solution,ThicknessEnum);
@@ -872,6 +823,6 @@
 		}
 	}
-	element->AddBasalInput(ThicknessEnum,newthickness,element->GetElementType());
-	element->AddBasalInput(ThicknessResidualEnum,thicknessresidual,element->GetElementType());
+	element->AddBasalInput2(ThicknessEnum,newthickness,element->GetElementType());
+	element->AddBasalInput2(ThicknessResidualEnum,thicknessresidual,element->GetElementType());
 	
 	xDelete<int>(doflist);
@@ -905,9 +856,9 @@
 	basalelement->GetInputListOnVertices(&newthickness[0],ThicknessEnum);
 	basalelement->GetInputListOnVertices(&oldthickness[0],ThicknessOldEnum);
-	basalelement->GetInputListOnVertices(&oldbase[0],BaseEnum);
-	basalelement->GetInputListOnVertices(&oldsurface[0],SurfaceEnum);
+	basalelement->GetInputListOnVertices(&oldbase[0],BaseOldEnum);
+	basalelement->GetInputListOnVertices(&oldsurface[0],SurfaceOldEnum);
 	basalelement->GetInputListOnVertices(&phi[0],MaskGroundediceLevelsetEnum);
 	basalelement->GetInputListOnVertices(&sealevel[0],SealevelEnum);
-	basalelement->GetInputListOnVertices(&cumdeltathickness[0],SealevelriseCumDeltathicknessEnum);
+	basalelement->GetInputListOnVertices(&cumdeltathickness[0],SealevelriseCumDeltathicknessOldEnum);
 
 	/*Do we do grounding line migration?*/
@@ -953,8 +904,8 @@
 
 	/*Add input to the element: */
-	element->AddBasalInput(SurfaceEnum,newsurface,P1Enum);
-	element->AddBasalInput(BaseEnum,newbase,P1Enum);
-	element->AddBasalInput(SealevelriseCumDeltathicknessEnum,cumdeltathickness,P1Enum);
-	element->AddBasalInput(SealevelriseDeltathicknessEnum,deltathickness,P1Enum);
+	element->AddBasalInput2(SurfaceEnum,newsurface,P1Enum);
+	element->AddBasalInput2(BaseEnum,newbase,P1Enum);
+	element->AddBasalInput2(SealevelriseCumDeltathicknessEnum,cumdeltathickness,P1Enum);
+	element->AddBasalInput2(SealevelriseDeltathicknessEnum,deltathickness,P1Enum);
 
 	/*Free ressources:*/
@@ -984,5 +935,5 @@
 
 	/*Intermediaries */
-	IssmDouble Jdet;
+	IssmDouble Jdet,D_scalar;
 	IssmDouble vx,vy;
 	IssmDouble* xyz_list = NULL;
@@ -994,12 +945,12 @@
 	/*Initialize Element vector and other vectors*/
 	ElementMatrix* Ke     = element->NewElementMatrix();
-	IssmDouble*    B      = xNew<IssmDouble>(dim*numnodes);
-	IssmDouble*    Bprime = xNew<IssmDouble>(dim*numnodes);
+	IssmDouble*    basis  = xNew<IssmDouble>(numnodes);
+	IssmDouble*    dbasis = xNew<IssmDouble>(dim*numnodes);
 	IssmDouble*    D      = xNewZeroInit<IssmDouble>(dim*dim);
 
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vxaverage_input=element->GetInput(VxEnum); _assert_(vxaverage_input);
-	Input* vyaverage_input=element->GetInput(VyEnum); _assert_(vyaverage_input);
+	Input2* vxaverage_input=element->GetInput2(VxEnum); _assert_(vxaverage_input);
+	Input2* vyaverage_input=element->GetInput2(VyEnum); _assert_(vyaverage_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1009,24 +960,21 @@
 
 		element->JacobianDeterminant(&Jdet,xyz_list,gauss);
-		GetB(B,element,dim,xyz_list,gauss);
-		GetBprime(Bprime,element,dim,xyz_list,gauss);
+		element->NodalFunctions(basis,gauss);
+		element->NodalFunctionsDerivatives(dbasis,xyz_list,gauss);
 		vxaverage_input->GetInputValue(&vx,gauss);
 		vyaverage_input->GetInputValue(&vy,gauss);
 
-		D[0*dim+0] = -gauss->weight*vx*Jdet;
-		D[1*dim+1] = -gauss->weight*vy*Jdet;
-
-		TripleMultiply(B,dim,numnodes,1,
-					D,dim,dim,0,
-					Bprime,dim,numnodes,0,
-					&Ke->values[0],1);
-
+		D_scalar = gauss->weight*Jdet;
+		for(int i=0;i<numnodes;i++){
+			for(int j=0;j<numnodes;j++){
+				Ke->values[i*numnodes+j] += -D_scalar*(vx*dbasis[0*numnodes+j]*basis[i] + vy*dbasis[1*numnodes+j]*basis[i]);
+			}
+		}
 	}
 
 	/*Clean up and return*/
 	xDelete<IssmDouble>(xyz_list);
-	xDelete<IssmDouble>(B);
-	xDelete<IssmDouble>(Bprime);
-	xDelete<IssmDouble>(D);
+	xDelete<IssmDouble>(basis);
+	xDelete<IssmDouble>(dbasis);
 	delete gauss;
 	return Ke;
Index: /issm/trunk/src/c/analyses/MasstransportAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/MasstransportAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/MasstransportAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
@@ -30,6 +30,4 @@
 		ElementVector* CreatePVectorCG(Element* element);
 		ElementVector* CreatePVectorDG(Element* element);
-		void           GetB(IssmDouble* B,Element* element,int dim,IssmDouble* xyz_list,Gauss* gauss);
-		void           GetBprime(IssmDouble* B,Element* element,int dim,IssmDouble* xyz_list,Gauss* gauss);
 		void           GetSolutionFromInputs(Vector<IssmDouble>* solution,Element* element);
 		void           GradientJ(Vector<IssmDouble>* gradient,Element* element,int control_type,int control_index);
Index: /issm/trunk/src/c/analyses/MeltingAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/MeltingAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/MeltingAnalysis.cpp	(revision 24686)
@@ -37,5 +37,5 @@
 	return 1;
 }/*}}}*/
-void MeltingAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void MeltingAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Now, is the model 3d? otherwise, do nothing: */
@@ -47,5 +47,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -53,14 +53,14 @@
 
 	/*Create inputs: */
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
-	iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
 }/*}}}*/
 void MeltingAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
Index: /issm/trunk/src/c/analyses/MeltingAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/MeltingAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/MeltingAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/SealevelriseAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/SealevelriseAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/SealevelriseAnalysis.cpp	(revision 24686)
@@ -2,4 +2,5 @@
 #include "../toolkits/toolkits.h"
 #include "../classes/classes.h"
+#include "../classes/Inputs2/TransientInput2.h"
 #include "../shared/shared.h"
 #include "../modules/modules.h"
@@ -18,7 +19,8 @@
 	return 1;
 }/*}}}*/
-void SealevelriseAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void SealevelriseAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int geodetic=0;
+	int dslmodel=0;
 
 	/*Update elements: */
@@ -27,5 +29,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -33,6 +35,6 @@
 
 	/*Create inputs: */
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
 	//those only if we have requested geodetic computations:
 	iomodel->FetchData(&geodetic,"md.slr.geodetic");
@@ -41,25 +43,243 @@
 		iomodel->FetchData(&masktype,"md.mask.type");
 		if (strcmp(masktype,"maskpsl")==0){
-			iomodel->FetchDataToInput(elements,"md.mask.ocean_levelset",MaskOceanLevelsetEnum);
-			iomodel->FetchDataToInput(elements,"md.mask.land_levelset",MaskLandLevelsetEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.mask.ocean_levelset",MaskOceanLevelsetEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.mask.land_levelset",MaskLandLevelsetEnum);
 		}
 		xDelete<char>(masktype);
 	}
-	iomodel->FetchDataToInput(elements,"md.slr.deltathickness",SealevelriseDeltathicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.spcthickness",SealevelriseSpcthicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.geometry.bed",BedEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.Ngia",SealevelNGiaRateEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.Ugia",SealevelUGiaRateEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.steric_rate",SealevelriseStericRateEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.hydro_rate",SealevelriseHydroRateEnum);
-
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.deltathickness",SealevelriseDeltathicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.spcthickness",SealevelriseSpcthicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.bed",BedEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.Ngia",SealevelNGiaRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.Ugia",SealevelUGiaRateEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.hydro_rate",SealevelriseHydroRateEnum);
+		
+	/*dynamic sea level: */
+	iomodel->FetchData(&dslmodel,"md.dsl.model");
+	if (dslmodel==1){ /*standard dsl model:{{{*/
+
+		/*deal with global mean steric rate: */
+		IssmDouble* str=NULL; 
+		IssmDouble* times = NULL;
+		int M,N;
+
+		/*fetch str vector:*/
+		iomodel->FetchData(&str,&M,&N,"md.dsl.global_average_thermosteric_sea_level_change"); _assert_(M==2);
+		
+		//recover time vector: 
+		times=xNew<IssmDouble>(N);
+		for(int t=0;t<N;t++) times[t] = str[N+t];
+
+		/*create transient input: */
+		inputs2->SetTransientInput(DslGlobalAverageThermostericSeaLevelChangeEnum,times,N);
+		TransientInput2* transientinput = inputs2->GetTransientInput(DslGlobalAverageThermostericSeaLevelChangeEnum);
+		
+			
+		for(int i=0;i<elements->Size();i++){
+			Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+
+			for(int t=0;t<N;t++){
+				switch(element->ObjectEnum()){
+					case TriaEnum:  transientinput->AddTriaTimeInput( t,1,&element->lid,&str[t],P0Enum); break;
+					case PentaEnum: transientinput->AddPentaTimeInput(t,1,&element->lid,&str[t],P0Enum); break;
+					default: _error_("Not implemented yet");
+				}
+			}
+		}
+
+		/*cleanup:*/
+		xDelete<IssmDouble>(times);
+		iomodel->DeleteData(str,"md.dsl.global_average_thermosteric_sea_level_change");
+
+		/*deal with dynamic sea level fields: */
+		iomodel->FetchDataToInput(inputs2,elements,"md.dsl.sea_surface_height_change_above_geoid", DslSeaSurfaceHeightChangeAboveGeoidEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.dsl.sea_water_pressure_change_at_sea_floor", DslSeaWaterPressureChangeAtSeaFloor);
+		
+	} /*}}}*/
+	else if (dslmodel==2){ /*multi-model ensemble dsl model:{{{*/
+	
+		/*variables:*/
+		int nummodels;
+		IssmDouble** pstr=NULL; 
+		IssmDouble*  str=NULL;
+		IssmDouble*  times = NULL;
+		int* pM = NULL;
+		int* pN = NULL;
+		int M,N;
+
+		/*deal with dsl.sea_surface_height_change_above_geoid {{{*/
+		iomodel->FetchData(&pstr,&pM,&pN,&nummodels,"md.dsl.global_average_thermosteric_sea_level_change");
+
+		/*go through the mat array and create a dataset of transient inputs:*/
+		for (int i=0;i<nummodels;i++){
+
+			M=pM[i];
+			N=pN[i];
+			str=pstr[i];
+
+			//recover time vector: 
+			times=xNew<IssmDouble>(N);
+			for(int t=0;t<N;t++) times[t] = str[(M-1)*N+t];
+
+			TransientInput2* transientinput=inputs2->SetDatasetTransientInput(DslGlobalAverageThermostericSeaLevelChangeEnum,i, times,N);
+			
+			for(int j=0;j<elements->Size();j++){
+				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
+
+				for(int t=0;t<N;t++){
+					switch(element->ObjectEnum()){
+						case TriaEnum:  transientinput->AddTriaTimeInput( t,1,&element->lid,&str[t],P0Enum); break;
+						case PentaEnum: transientinput->AddPentaTimeInput(t,1,&element->lid,&str[t],P0Enum); break;
+						default: _error_("Not implemented yet");
+					}
+				}
+			}
+			xDelete<IssmDouble>(times);
+		}
+		/*Delete data:*/
+		for(int i=0;i<nummodels;i++){
+			IssmDouble* str=pstr[i];
+			xDelete<IssmDouble>(str);
+		}
+		xDelete<IssmDouble*>(pstr);
+		xDelete<int>(pM);
+		xDelete<int>(pN);
+		/*}}}*/
+		/*now do the same with the dsl.sea_surface_height_change_above_geoid field:{{{ */
+		iomodel->FetchData(&pstr,&pM,&pN,&nummodels,"md.dsl.sea_surface_height_change_above_geoid");
+
+		for (int i=0;i<nummodels;i++){
+			M=pM[i];
+			N=pN[i];
+			str=pstr[i];
+		
+
+			//recover time vector: 
+			times=xNew<IssmDouble>(N);
+			for(int t=0;t<N;t++) times[t] = str[(M-1)*N+t];
+			
+			TransientInput2* transientinput=inputs2->SetDatasetTransientInput(DslSeaSurfaceHeightChangeAboveGeoidEnum,i, times,N);
+	
+			for(int j=0;j<elements->Size();j++){
+
+				/*Get the right transient input*/
+				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
+
+				/*Get values and lid list*/
+				const int   numvertices = element->GetNumberOfVertices();
+				int        *vertexlids = xNew<int>(numvertices);
+				int        *vertexsids = xNew<int>(numvertices);
+
+
+				/*Recover vertices ids needed to initialize inputs*/
+				_assert_(iomodel->elements);
+				for(int k=0;k<numvertices;k++){
+					vertexsids[k] =reCast<int>(iomodel->elements[numvertices*element->Sid()+k]); //ids for vertices are in the elements array from Matlab
+					vertexlids[k]=iomodel->my_vertices_lids[vertexsids[k]-1];
+				}
+
+				//element->GetVerticesLidList(vertexlids);
+				//element->GetVerticesSidList(vertexsids);
+				IssmDouble* values=xNew<IssmDouble>(numvertices);
+
+				for(int t=0;t<N;t++){
+					for (int k=0;k<numvertices;k++)values[k]=str[N*vertexsids[k]+t];
+
+					switch(element->ObjectEnum()){
+						case TriaEnum:  transientinput->AddTriaTimeInput( t,numvertices,vertexlids,values,P1Enum); break;
+						case PentaEnum: transientinput->AddPentaTimeInput(t,numvertices,vertexlids,values,P1Enum); break;
+						default: _error_("Not implemented yet");
+					}
+				}
+				xDelete<IssmDouble>(values);
+				xDelete<int>(vertexlids);
+				xDelete<int>(vertexsids);
+			}
+			
+			xDelete<IssmDouble>(times);
+		}
+		
+		/*Delete data:*/
+		for(int i=0;i<nummodels;i++){
+			IssmDouble* str=pstr[i];
+			xDelete<IssmDouble>(str);
+		}
+		xDelete<IssmDouble*>(pstr);
+		xDelete<int>(pM);
+		xDelete<int>(pN);
+		/*}}}*/
+		/*now do the same with the dsl.sea_water_pressure_change_at_sea_floor field:{{{ */
+		iomodel->FetchData(&pstr,&pM,&pN,&nummodels,"md.dsl.sea_water_pressure_change_at_sea_floor");
+
+		for (int i=0;i<nummodels;i++){
+			M=pM[i];
+			N=pN[i];
+			str=pstr[i];
+
+			//recover time vector: 
+			times=xNew<IssmDouble>(N);
+			for(int t=0;t<N;t++) times[t] = str[(M-1)*N+t];
+
+			TransientInput2* transientinput=inputs2->SetDatasetTransientInput(DslSeaWaterPressureChangeAtSeaFloor,i, times,N);
+	
+			for(int j=0;j<elements->Size();j++){
+
+				/*Get the right transient input*/
+				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
+
+				/*Get values and lid list*/
+				const int   numvertices = element->GetNumberOfVertices();
+				int        *vertexlids = xNew<int>(numvertices);
+				int        *vertexsids = xNew<int>(numvertices);
+				
+				
+				/*Recover vertices ids needed to initialize inputs*/
+				_assert_(iomodel->elements);
+				for(int k=0;k<numvertices;k++){
+					vertexsids[k] =reCast<int>(iomodel->elements[numvertices*element->Sid()+k]); //ids for vertices are in the elements array from Matlab
+					vertexlids[k]=iomodel->my_vertices_lids[vertexsids[k]-1];
+				}
+				//element->GetVerticesLidList(vertexlids);
+				//element->GetVerticesSidList(vertexsids);
+				
+				IssmDouble* values=xNew<IssmDouble>(numvertices);
+
+				for(int t=0;t<N;t++){
+					for (int k=0;k<numvertices;k++)values[k]=str[N*vertexsids[k]+t];
+
+					switch(element->ObjectEnum()){
+						case TriaEnum:  transientinput->AddTriaTimeInput( t,numvertices,vertexlids,values,P1Enum); break;
+						case PentaEnum: transientinput->AddPentaTimeInput(t,numvertices,vertexlids,values,P1Enum); break;
+						default: _error_("Not implemented yet");
+					}
+				}
+				xDelete<IssmDouble>(values);
+				xDelete<int>(vertexlids);
+				xDelete<int>(vertexsids);
+			}
+			xDelete<IssmDouble>(times);
+		}
+		
+		/*Delete data:*/
+		for(int i=0;i<nummodels;i++){
+			IssmDouble* str=pstr[i];
+			xDelete<IssmDouble>(str);
+		}
+		xDelete<IssmDouble*>(pstr);
+		xDelete<int>(pM);
+		xDelete<int>(pN);
+		/*}}}*/
+
+	} /*}}}*/
+	else _error_("Dsl model " << dslmodel << " not implemented yet!");
+			
 	/*Initialize cumdeltalthickness and sealevel rise rate input*/
-	InputUpdateFromConstantx(elements,0.,SealevelriseCumDeltathicknessEnum);
-	InputUpdateFromConstantx(elements,0.,SealevelNEsaRateEnum);
-	InputUpdateFromConstantx(elements,0.,SealevelUEsaRateEnum);
-	InputUpdateFromConstantx(elements,0.,SealevelRSLRateEnum);
-	InputUpdateFromConstantx(elements,0.,SealevelEustaticMaskEnum);
-	InputUpdateFromConstantx(elements,0.,SealevelEustaticOceanMaskEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelriseCumDeltathicknessEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelNEsaRateEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelUEsaRateEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelRSLRateEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelEustaticMaskEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,SealevelEustaticOceanMaskEnum);
 
 }/*}}}*/
@@ -70,4 +290,5 @@
 	IssmDouble* love_k=NULL;
 	IssmDouble* love_l=NULL;
+	int         dslmodel=0;
 
 	bool elastic=false;
@@ -91,4 +312,5 @@
 
 	/*some constant parameters: */
+	parameters->AddObject(iomodel->CopyConstantObject("md.dsl.model",DslModelEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.slr.geodetic_run_frequency",SealevelriseGeodeticRunFrequencyEnum));
 	parameters->AddObject(iomodel->CopyConstantObject("md.slr.reltol",SealevelriseReltolEnum));
@@ -109,6 +331,23 @@
 	parameters->AddObject(iomodel->CopyConstantObject("md.slr.geodetic",SealevelriseGeodeticEnum));
 
+	/*Deal with dsl multi-model ensembles: {{{*/
+	iomodel->FetchData(&dslmodel,"md.dsl.model");
+	parameters->AddObject(iomodel->CopyConstantObject("md.dsl.compute_fingerprints",DslComputeFingerprintsEnum));
+	if(dslmodel==2){
+		int modelid; 
+		int nummodels;
+
+		parameters->AddObject(iomodel->CopyConstantObject("md.dsl.modelid",DslModelidEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.dsl.nummodels",DslNummodelsEnum));
+		iomodel->FetchData(&modelid,"md.dsl.modelid");
+		iomodel->FetchData(&nummodels,"md.dsl.nummodels");
+
+		/*quick checks: */
+		if(nummodels<=0)_error_("dslmme object in  md.dsl field should contain at least 1 ensemble model!");
+		if(modelid<=0 || modelid>nummodels)_error_("modelid field in dslmme object of md.dsl field should be between 1 and the number of ensemble runs!");
+	} /*}}}*/
+	/*Deal with elasticity {{{*/
 	iomodel->FetchData(&elastic,"md.slr.elastic");
-	if(elastic){
+	if(elastic){ 
 
 		/*love numbers: */
@@ -229,7 +468,6 @@
 		xDelete<IssmDouble>(H_elastic);
 		xDelete<IssmDouble>(H_elastic_local);
-	}
-
-	/*Transitions: */
+	} /*}}}*/
+	/*Transitions:{{{ */
 	iomodel->FetchData(&transitions,&transitions_M,&transitions_N,&ntransitions,"md.slr.transitions");
 	if(transitions){
@@ -243,10 +481,10 @@
 		xDelete<int>(transitions_M);
 		xDelete<int>(transitions_N);
-	}
-
-	/*Requested outputs*/
+	} /*}}}*/
+	/*Requested outputs {{{*/
 	iomodel->FindConstant(&requestedoutputs,&numoutputs,"md.slr.requested_outputs");
 	if(numoutputs)parameters->AddObject(new StringArrayParam(SealevelriseRequestedOutputsEnum,requestedoutputs,numoutputs));
 	iomodel->DeleteData(&requestedoutputs,numoutputs,"md.slr.requested_outputs");
+	/*}}}*/
 
 }/*}}}*/
Index: /issm/trunk/src/c/analyses/SealevelriseAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/SealevelriseAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/SealevelriseAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/SmbAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/SmbAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/SmbAnalysis.cpp	(revision 24686)
@@ -21,5 +21,5 @@
 	return 1;
 }/*}}}*/
-void SmbAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void SmbAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int    smb_model;
@@ -31,5 +31,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -40,64 +40,64 @@
 	switch(smb_model){
 		case SMBforcingEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum,0.);
 			break;
 		case SMBgembEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.Ta",SmbTaEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.V",SmbVEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dswrf",SmbDswrfEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dlwrf",SmbDlwrfEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.P",SmbPEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.eAir",SmbEAirEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.pAir",SmbPAirEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.zTop",SmbZTopEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dzTop",SmbDzTopEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dzMin",SmbDzMinEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.zY",SmbZYEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.zMax",SmbZMaxEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.zMin",SmbZMinEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Tmean",SmbTmeanEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Vmean",SmbVmeanEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.C",SmbCEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Tz",SmbTzEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Vz",SmbVzEnum);
-			InputUpdateFromConstantx(elements,0.,SmbIsInitializedEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Dzini",SmbDziniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Dini",SmbDiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Reini",SmbReiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Gdnini",SmbGdniniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Gspini",SmbGspiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.ECini",SmbECiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Wini",SmbWiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Aini",SmbAiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Tini",SmbTiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.Sizeini",SmbSizeiniEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.aValue",SmbAValueEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.teValue",SmbTeValueEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Ta",SmbTaEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.V",SmbVEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dswrf",SmbDswrfEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dlwrf",SmbDlwrfEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.P",SmbPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.eAir",SmbEAirEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.pAir",SmbPAirEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.zTop",SmbZTopEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dzTop",SmbDzTopEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dzMin",SmbDzMinEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.zY",SmbZYEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.zMax",SmbZMaxEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.zMin",SmbZMinEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Tmean",SmbTmeanEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Vmean",SmbVmeanEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.C",SmbCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Tz",SmbTzEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Vz",SmbVzEnum);
+			InputUpdateFromConstantx(inputs2,elements,false,SmbIsInitializedEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Dzini",SmbDziniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Dini",SmbDiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Reini",SmbReiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Gdnini",SmbGdniniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Gspini",SmbGspiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.ECini",SmbECiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Wini",SmbWiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Aini",SmbAiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Tini",SmbTiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.Sizeini",SmbSizeiniEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.aValue",SmbAValueEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.teValue",SmbTeValueEnum);
 			break;
 		case SMBpddEnum:
 			iomodel->FindConstant(&isdelta18o,"md.smb.isdelta18o");
 			iomodel->FindConstant(&ismungsm,"md.smb.ismungsm");
-			iomodel->FetchDataToInput(elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.s0p",SmbS0pEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.s0t",SmbS0tEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0p",SmbS0pEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0t",SmbS0tEnum);
 			if(isdelta18o || ismungsm){
-				iomodel->FetchDataToInput(elements,"md.smb.temperatures_lgm",SmbTemperaturesLgmEnum);
-				iomodel->FetchDataToInput(elements,"md.smb.temperatures_presentday",SmbTemperaturesPresentdayEnum);
-				iomodel->FetchDataToInput(elements,"md.smb.precipitations_presentday",SmbPrecipitationsPresentdayEnum);
-				iomodel->FetchDataToInput(elements,"md.smb.precipitations_lgm",SmbPrecipitationsLgmEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.temperatures_lgm",SmbTemperaturesLgmEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.temperatures_presentday",SmbTemperaturesPresentdayEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.precipitations_presentday",SmbPrecipitationsPresentdayEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.precipitations_lgm",SmbPrecipitationsLgmEnum);
 			}else{
-				iomodel->FetchDataToInput(elements,"md.smb.precipitation",SmbPrecipitationEnum);
-				iomodel->FetchDataToInput(elements,"md.smb.monthlytemperatures",SmbMonthlytemperaturesEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.precipitation",SmbPrecipitationEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.monthlytemperatures",SmbMonthlytemperaturesEnum);
 			}
 			break;
 		case SMBpddSicopolisEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.s0p",SmbS0pEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.s0t",SmbS0tEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0p",SmbS0pEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0t",SmbS0tEnum);
 			iomodel->FindConstant(&isfirnwarming,"md.smb.isfirnwarming");
-			iomodel->FetchDataToInput(elements,"md.smb.smb_corr",SmbSmbCorrEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.precipitation",SmbPrecipitationEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.monthlytemperatures",SmbMonthlytemperaturesEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.precipitation_anomaly",SmbPrecipitationsAnomalyEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.temperature_anomaly",SmbTemperaturesAnomalyEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.smb_corr",SmbSmbCorrEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.precipitation_anomaly",SmbPrecipitationsAnomalyEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.temperature_anomaly",SmbTemperaturesAnomalyEnum);
+			iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.monthlytemperatures",SmbMonthlytemperaturesEnum);
+			iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.precipitation",SmbPrecipitationEnum);
 			break;
 		case SMBd18opddEnum:
@@ -107,67 +107,94 @@
 			iomodel->FindConstant(&isd18opd,"md.smb.isd18opd");
 			iomodel->FindConstant(&issetpddfac,"md.smb.issetpddfac");
-			iomodel->FetchDataToInput(elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.s0p",SmbS0pEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.s0t",SmbS0tEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0p",SmbS0pEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0t",SmbS0tEnum);
 			if(isd18opd){
-				iomodel->FetchDataToInput(elements,"md.smb.temperatures_presentday",SmbTemperaturesPresentdayEnum);
-				iomodel->FetchDataToInput(elements,"md.smb.precipitations_presentday",SmbPrecipitationsPresentdayEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.temperatures_presentday",SmbTemperaturesPresentdayEnum);
+				iomodel->FetchDataToDatasetInput(inputs2,elements,"md.smb.precipitations_presentday",SmbPrecipitationsPresentdayEnum);
 				if(!istemperaturescaled){
-					iomodel->FetchDataToInput(elements,"md.smb.temperatures_reconstructed",SmbTemperaturesReconstructedEnum);
+					/*Fetch array*/
+					IssmDouble* doublearray = NULL;
+					int         M,N;
+					iomodel->FetchData(&doublearray,&M,&N,"md.smb.temperatures_reconstructed");
+					if(M!=iomodel->numberofvertices+1) _error_("md.smb.temperatures_reconstructed should have nbv+1 rows");
+					if(N%12!=0) _error_("md.smb.temperatures_reconstructed should have a multiple of 12 columns (since it is monthly)");
+
+					/*Build indices*/
+					int* ids = xNew<int>(N); for(int i=0;i<N;i++) ids[i] = i;
+
+					for(int i=0;i<elements->Size();i++){
+						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+						element->DatasetInputCreate(doublearray,M-1,N,ids,N,inputs2,iomodel,SmbTemperaturesReconstructedEnum);
+					}
+					xDelete<int>(ids);
+					iomodel->DeleteData(doublearray,"md.smb.temperatures_reconstructed");
 				}
 				if(!isprecipscaled){
-					iomodel->FetchDataToInput(elements,"md.smb.precipitations_reconstructed",SmbPrecipitationsReconstructedEnum);
+					/*Fetch array*/
+					IssmDouble* doublearray = NULL;
+					int         M,N;
+					iomodel->FetchData(&doublearray,&M,&N,"md.smb.precipitations_reconstructed");
+					if(M!=iomodel->numberofvertices+1) _error_("md.smb.precipitations_reconstructed should have nbv+1 rows");
+					if(N%12!=0) _error_("md.smb.precipitations_reconstructed should have a multiple of 12 columns (since it is monthly)");
+
+					/*Build indices*/
+					int* ids = xNew<int>(N); for(int i=0;i<N;i++) ids[i] = i;
+
+					for(int i=0;i<elements->Size();i++){
+						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+						element->DatasetInputCreate(doublearray,M-1,N,ids,N,inputs2,iomodel,SmbPrecipitationsReconstructedEnum);
+					}
+					xDelete<int>(ids);
+					iomodel->DeleteData(doublearray,"md.smb.precipitations_reconstructed");
 				}
 			}
 			if(issetpddfac){
-				iomodel->FetchDataToInput(elements,"md.smb.pddfac_snow",SmbPddfacSnowEnum,-1.);
-				iomodel->FetchDataToInput(elements,"md.smb.pddfac_ice",SmbPddfacIceEnum,-1.);
+				iomodel->FetchDataToInput(inputs2,elements,"md.smb.pddfac_snow",SmbPddfacSnowEnum,-1.);
+				iomodel->FetchDataToInput(inputs2,elements,"md.smb.pddfac_ice",SmbPddfacIceEnum,-1.);
 			}
 			break;
 		case SMBgradientsEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.href",SmbHrefEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.smbref",SmbSmbrefEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.b_pos",SmbBPosEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.b_neg",SmbBNegEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.href",SmbHrefEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.smbref",SmbSmbrefEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.b_pos",SmbBPosEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.b_neg",SmbBNegEnum);
 			break;
 		case SMBgradientselaEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.ela",SmbElaEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.b_pos",SmbBPosEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.b_neg",SmbBNegEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.b_max",SmbBMaxEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.b_min",SmbBMinEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.ela",SmbElaEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.b_pos",SmbBPosEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.b_neg",SmbBNegEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.b_max",SmbBMaxEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.b_min",SmbBMinEnum);
 			break;
 		case SMBhenningEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.smbref",SmbSmbrefEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.smbref",SmbSmbrefEnum,0.);
 			break;
 		case SMBcomponentsEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.accumulation",SmbAccumulationEnum,0.);
-			iomodel->FetchDataToInput(elements,"md.smb.evaporation",SmbEvaporationEnum,0.);
-			iomodel->FetchDataToInput(elements,"md.smb.runoff",SmbRunoffEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.accumulation",SmbAccumulationEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.evaporation",SmbEvaporationEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.runoff",SmbRunoffEnum,0.);
 			break;
 		case SMBmeltcomponentsEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.accumulation",SmbAccumulationEnum,0.);
-			iomodel->FetchDataToInput(elements,"md.smb.evaporation",SmbEvaporationEnum,0.);
-			iomodel->FetchDataToInput(elements,"md.smb.melt",SmbMeltEnum,0.);
-			iomodel->FetchDataToInput(elements,"md.smb.refreeze",SmbRefreezeEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.accumulation",SmbAccumulationEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.evaporation",SmbEvaporationEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.melt",SmbMeltEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.refreeze",SmbRefreezeEnum,0.);
 			break;
 		case SMBgradientscomponentsEnum:
-			iomodel->FetchDataToInput(elements,"md.smb.accualti",SmbAccualtiEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.accugrad",SmbAccugradEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.runoffalti",SmbRunoffaltiEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.runoffgrad",SmbRunoffgradEnum);
+			/* Nothing to add to input */
 			break;
 		case SMBsemicEnum:
-			iomodel->FetchDataToInput(elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.s0gcm",SmbS0gcmEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailysnowfall",SmbDailysnowfallEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailyrainfall",SmbDailyrainfallEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailydsradiation",SmbDailydsradiationEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailydlradiation",SmbDailydlradiationEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailywindspeed",SmbDailywindspeedEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailypressure",SmbDailypressureEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailyairdensity",SmbDailyairdensityEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailyairhumidity",SmbDailyairhumidityEnum);
-			iomodel->FetchDataToInput(elements,"md.smb.dailytemperature",SmbDailytemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.thermal.spctemperature",ThermalSpctemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.s0gcm",SmbS0gcmEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailysnowfall",SmbDailysnowfallEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailyrainfall",SmbDailyrainfallEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailydsradiation",SmbDailydsradiationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailydlradiation",SmbDailydlradiationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailywindspeed",SmbDailywindspeedEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailypressure",SmbDailypressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailyairdensity",SmbDailyairdensityEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailyairhumidity",SmbDailyairhumidityEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.smb.dailytemperature",SmbDailytemperatureEnum);
 			break;
 		default:
@@ -251,4 +278,5 @@
 				iomodel->DeleteData(temp,"md.smb.delta18o_surface");
 			}
+
 			break;
 		case SMBpddSicopolisEnum:
@@ -269,5 +297,62 @@
 				parameters->AddObject(new TransientParam(SmbDelta18oEnum,&temp[0],&temp[M],interp,M));
 				iomodel->DeleteData(temp,"md.smb.delta18o");
-			}
+
+				IssmDouble yts;
+				bool istemperaturescaled,isprecipscaled;
+				iomodel->FindConstant(&yts,"md.constants.yts");
+				iomodel->FindConstant(&istemperaturescaled,"md.smb.istemperaturescaled");
+				iomodel->FindConstant(&isprecipscaled,"md.smb.isprecipscaled");
+				if(!istemperaturescaled){
+					/*Fetch array*/
+					IssmDouble* doublearray = NULL;
+					int         M,N;
+					iomodel->FetchData(&doublearray,&M,&N,"md.smb.temperatures_reconstructed");
+					if(M!=iomodel->numberofvertices+1) _error_("md.smb.temperatures_reconstructed should have nbv+1 rows");
+					if(N%12!=0) _error_("md.smb.temperatures_reconstructed should have a multiple of 12 columns (since it is monthly)");
+					int numyears = N/12; _assert_(numyears*12==N);
+
+					/*Check times*/
+					#ifdef _ISSM_DEBUG_
+					for(int i=0;i<numyears;i++){
+						for(int j=1;j<12;j++){
+							//_assert_(floor(doublearray[(M-1)*N+i*12+j]/yts)==floor(doublearray[(M-1)*N+i*12]/yts));
+							_assert_(doublearray[(M-1)*N+i*12+j]>doublearray[(M-1)*N+i*12+j-1]);
+						}
+					}
+					#endif
+
+					/*Build time*/
+					IssmDouble* times = xNew<IssmDouble>(numyears); for(int i=0;i<numyears;i++) times[i] = doublearray[(M-1)*N+i*12];
+					parameters->AddObject(new DoubleVecParam(SmbTemperaturesReconstructedYearsEnum,times,numyears));
+					xDelete<IssmDouble>(times);
+					iomodel->DeleteData(doublearray,"md.smb.temperatures_reconstructed");
+				}
+				if(!isprecipscaled){
+					/*Fetch array*/
+					IssmDouble* doublearray = NULL;
+					int         M,N;
+					iomodel->FetchData(&doublearray,&M,&N,"md.smb.precipitations_reconstructed");
+					if(M!=iomodel->numberofvertices+1) _error_("md.smb.precipitations_reconstructed should have nbv+1 rows");
+					if(N%12!=0) _error_("md.smb.precipitations_reconstructed should have a multiple of 12 columns (since it is monthly)");
+					int numyears = N/12; _assert_(numyears*12==N);
+
+					/*Check times*/
+					#ifdef _ISSM_DEBUG_
+					for(int i=0;i<numyears;i++){
+						for(int j=1;j<12;j++){
+							//_assert_(floor(doublearray[(M-1)*N+i*12+j]/yts)==floor(doublearray[(M-1)*N+i*12]/yts));
+							_assert_(doublearray[(M-1)*N+i*12+j]>doublearray[(M-1)*N+i*12+j-1]);
+						}
+					}
+					#endif
+
+					/*Build time*/
+					IssmDouble* times = xNew<IssmDouble>(numyears); for(int i=0;i<numyears;i++) times[i] = doublearray[(M-1)*N+i*12];
+					parameters->AddObject(new DoubleVecParam(SmbPrecipitationsReconstructedYearsEnum,times,numyears));
+					xDelete<IssmDouble>(times);
+					iomodel->DeleteData(doublearray,"md.smb.precipitations_reconstructed");
+				}
+			}
+
 			break;
 		case SMBgradientsEnum:
Index: /issm/trunk/src/c/analyses/SmbAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/SmbAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/SmbAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/SmoothAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/SmoothAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/SmoothAnalysis.cpp	(revision 24686)
@@ -18,5 +18,5 @@
 	return 1;
 }/*}}}*/
-void SmoothAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void SmoothAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -25,5 +25,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
@@ -70,5 +70,5 @@
 	element->FindParam(&l,SmoothThicknessMultiplierEnum); _assert_(l>0.);
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* thickness_input = element->GetInput(ThicknessEnum); _assert_(thickness_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
 
 	/* Start looping on the number of gaussian points: */
@@ -116,8 +116,8 @@
 	IssmDouble  Jdet,value;
 	IssmDouble *xyz_list  = NULL;
-	Input      *input = NULL;
+	Input2     *input = NULL;
 
 	/*SPECIFICS: Driving stress for balance velocities*/
-	Input*      H_input = NULL, *surface_input = NULL, *vx_input = NULL, *vy_input = NULL;
+	Input2*      H_input = NULL, *surface_input = NULL, *vx_input = NULL, *vy_input = NULL;
 	IssmDouble  taud_x,norms,normv,vx,vy;
 	IssmDouble  rho_ice,gravity,slope[2],thickness;
@@ -139,16 +139,16 @@
 			rho_ice       = element->FindParam(MaterialsRhoIceEnum);
 			gravity       = element->FindParam(ConstantsGEnum);
-			H_input       = element->GetInput(ThicknessEnum); _assert_(H_input);
-			surface_input = element->GetInput(SurfaceEnum);   _assert_(surface_input);
-			vx_input      = element->GetInput(VxEnum);
-			vy_input      = element->GetInput(VyEnum);
+			H_input       = element->GetInput2(ThicknessEnum); _assert_(H_input);
+			surface_input = element->GetInput2(SurfaceEnum);   _assert_(surface_input);
+			vx_input      = element->GetInput2(VxEnum);
+			vy_input      = element->GetInput2(VyEnum);
 			}
 			break;
 		case SurfaceSlopeXEnum:
 		case SurfaceSlopeYEnum:{
-			surface_input = element->GetInput(SurfaceEnum);   _assert_(surface_input);
-			}
-			break;
-		default: input = element->GetInput(input_enum);
+			surface_input = element->GetInput2(SurfaceEnum);   _assert_(surface_input);
+			}
+			break;
+		default: input = element->GetInput2(input_enum);
 	}
 
Index: /issm/trunk/src/c/analyses/SmoothAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/SmoothAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/SmoothAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/StressbalanceAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/StressbalanceAnalysis.cpp	(revision 24686)
@@ -1,8 +1,19 @@
 #include "./StressbalanceAnalysis.h"
 #include "../toolkits/toolkits.h"
-#include "../classes/classes.h"
-#include "../shared/shared.h"
-#include "../modules/modules.h"
 #include "../solutionsequences/solutionsequences.h"
+#include "../classes/IoModel.h"
+#include "../classes/FemModel.h"
+#include "../classes/Constraints/Constraints.h"
+#include "../classes/Constraints/Constraint.h"
+#include "../classes/Constraints/SpcStatic.h"
+#include "../classes/Params/Parameters.h"
+#include "../classes/Nodes.h"
+#include "../classes/Node.h"
+#include "../classes/Elements/Elements.h"
+#include "../classes/Elements/Element.h"
+#include "../modules/ModelProcessorx/ModelProcessorx.h"
+#include "../modules/IoModelToConstraintsx/IoModelToConstraintsx.h"
+#include "../modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h"
+#include "../modules/SetActiveNodesLSMx/SetActiveNodesLSMx.h"
 #include "../cores/cores.h"
 
@@ -659,5 +670,5 @@
 	return numdofs;
 }/*}}}*/
-void StressbalanceAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void StressbalanceAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Intermediaries*/
@@ -730,5 +741,11 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement_list[i]);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement_list[i]);
+
+			/*Need to know the type of approximation for this element*/
+			if(iomodel->Data("md.flowequation.element_equation")){
+				inputs2->SetInput(ApproximationEnum,counter,IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[i])));
+			}
+
 			counter++;
 		}
@@ -736,29 +753,29 @@
 
 	/*Create inputs: */
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum,0.);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum,0.);
-	iomodel->FetchDataToInput(elements,"md.stressbalance.loadingforcex",LoadingforceXEnum);
-	iomodel->FetchDataToInput(elements,"md.stressbalance.loadingforcey",LoadingforceYEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.stressbalance.loadingforcex",LoadingforceXEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.stressbalance.loadingforcey",LoadingforceYEnum);
 	#ifdef LATERALFRICTION
-	iomodel->FetchDataToInput(elements,"md.mesh.vertexonboundary",MeshVertexonboundaryEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonboundary",MeshVertexonboundaryEnum);
 	#endif
 
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
 	}
 	if(iomodel->domaintype==Domain3DEnum){
-		iomodel->FetchDataToInput(elements,"md.flowequation.borderFS",FlowequationBorderFSEnum);
-		iomodel->FetchDataToInput(elements,"md.stressbalance.loadingforcez",LoadingforceZEnum);
-		iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum,0.);
+		iomodel->FetchDataToInput(inputs2,elements,"md.flowequation.borderFS",FlowequationBorderFSEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.stressbalance.loadingforcez",LoadingforceZEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum,0.);
 	}
 	if(isFS){
-		iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum,0.);
+		iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum,0.);
 
 		/*Add basal forcings to compute melt rate*/
@@ -767,5 +784,5 @@
 		switch(basalforcing_model){
 			case FloatingMeltRateEnum:
-				iomodel->FetchDataToInput(elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
 				break;
 			case LinearFloatingMeltRateEnum:
@@ -776,17 +793,18 @@
 				break;
 			case SpatialLinearFloatingMeltRateEnum:
-				iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
-				iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
-				iomodel->FetchDataToInput(elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
 				break;
 			case BasalforcingsPicoEnum:
-				iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.overturning_coeff",BasalforcingsPicoOverturningCoeffEnum);
 				break;
 			case BasalforcingsIsmip6Enum:
-				iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
 				break;
 			case BeckmannGoosseFloatingMeltRateEnum:
-				iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
-				iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
 				break;
 			default:
@@ -797,5 +815,5 @@
 	iomodel->FindConstant(&fe_FS,"md.flowequation.fe_FS");
 	if(fe_FS==LATaylorHoodEnum || fe_FS==LACrouzeixRaviartEnum){
-		InputUpdateFromConstantx(elements,0.,SigmaNNEnum);
+		InputUpdateFromConstantx(inputs2,elements,0.,SigmaNNEnum);
 	}
 
@@ -804,83 +822,83 @@
 		case 1:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 2:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
 			break;
 		case 3:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.As",FrictionAsEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 4:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
 			break;
 		case 5:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.water_layer",FrictionWaterLayerEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
 			break;
 		case 6:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			break;
 		case 7:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if(FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 
 			}
 			break;
 		case 9:
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(elements,1.,FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
+			InputUpdateFromConstantx(inputs2,elements,1.,FrictionPEnum);
+			InputUpdateFromConstantx(inputs2,elements,1.,FrictionQEnum);
 			break;
 		case 10:
-			iomodel->FetchDataToInput(elements,"md.friction.till_friction_angle",FrictionTillFrictionAngleEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.sediment_compressibility_coefficient",FrictionSedimentCompressibilityCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.till_friction_angle",FrictionTillFrictionAngleEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.sediment_compressibility_coefficient",FrictionSedimentCompressibilityCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.hydrology.watercolumn_max",HydrologyWatercolumnMaxEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.watercolumn",WatercolumnEnum,0.);
 			break;
 		case 11:
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.Cmax",FrictionCmaxEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.Cmax",FrictionCmaxEnum);
 			break;
 		case 12:
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.f",FrictionfEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.f",FrictionfEnum);
 			break;
 		default:
@@ -889,5 +907,5 @@
 
 #ifdef _HAVE_ANDROID_
-	elements->InputDuplicate(FrictionCoefficientEnum,AndroidFrictionCoefficientEnum);
+	inputs2->DuplicateInput(FrictionCoefficientEnum,AndroidFrictionCoefficientEnum);
 #endif
 
@@ -948,4 +966,5 @@
 		case 1:
 			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 			break;
 		case 2:
@@ -953,8 +972,10 @@
 		case 3:
 			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));			
 			break;
 		case 4:
 			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));			
 			break;
 		case 5:
@@ -966,4 +987,5 @@
 		case 7:
 			parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 			break;
 		case 8:
@@ -971,4 +993,5 @@
 		case 9:
 			parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));			
 			break;
 		case 10:
@@ -980,7 +1003,9 @@
 		case 11:
 			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));			
 			break;
 		case 12:
 			parameters->AddObject(new IntParam(FrictionCouplingEnum,2));
+			parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));			
 			break;
 		default: _error_("Friction law "<<frictionlaw<<" not implemented yet");
@@ -1059,5 +1084,5 @@
 
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	switch(approximation){
 		case FSApproximationEnum:
@@ -1072,5 +1097,5 @@
 
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	switch(approximation){
 		case SSAApproximationEnum:
@@ -1088,5 +1113,5 @@
 ElementMatrix* StressbalanceAnalysis::CreateKMatrix(Element* element){/*{{{*/
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	switch(approximation){
 		case SIAApproximationEnum:
@@ -1115,5 +1140,5 @@
 
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	switch(approximation){
 		case SIAApproximationEnum:
@@ -1142,5 +1167,5 @@
 
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	switch(approximation){
 		case FSApproximationEnum: case NoneApproximationEnum:
@@ -1178,5 +1203,5 @@
 	int numnodes = element->GetNumberOfNodes();
 	int numdof   = numnodes*dofpernode;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 
 	/*Fetch dof list and allocate solution vector*/
@@ -1185,7 +1210,7 @@
 
 	/*Get inputs*/
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=NULL;
-	if(domaintype!=Domain2DverticalEnum){vy_input=element->GetInput(VyEnum); _assert_(vy_input);}
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=NULL;
+	if(domaintype!=Domain2DverticalEnum){vy_input=element->GetInput2(VyEnum); _assert_(vy_input);}
 
 	/*Ok, we have vx and vy in values, fill in vx and vy arrays: */
@@ -1216,5 +1241,5 @@
 
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	switch(approximation){
 		case FSApproximationEnum: case NoneApproximationEnum:
@@ -1287,7 +1312,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* thickness_input = basalelement->GetInput(ThicknessEnum);_assert_(thickness_input);
-	Input* vx_input        = basalelement->GetInput(VxEnum);       _assert_(vx_input);
-	Input* vy_input        = basalelement->GetInput(VyEnum);       _assert_(vy_input);
+	Input2* thickness_input = basalelement->GetInput2(ThicknessEnum);_assert_(thickness_input);
+	Input2* vx_input        = basalelement->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2* vy_input        = basalelement->GetInput2(VyEnum);       _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1406,6 +1431,6 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&friction_style,GroundinglineFrictionInterpolationEnum);
-	Input* surface_input    = element->GetInput(SurfaceEnum); _assert_(surface_input);
-	Input* gllevelset_input = NULL;
+	Input2* surface_input    = element->GetInput2(SurfaceEnum); _assert_(surface_input);
+	Input2* gllevelset_input = NULL;
 
 	/*build friction object, used later on: */
@@ -1415,5 +1440,5 @@
 	if(!(friction_style==SubelementFriction2Enum)) phi=element->GetGroundedPortion(xyz_list);
 	if(friction_style==SubelementFriction2Enum){
-		gllevelset_input=element->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+		gllevelset_input=element->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
 		element->GetGroundedPart(&point1,&fraction1,&fraction2,&mainlyfloating);
 	   gauss = element->NewGauss(point1,fraction1,fraction2,mainlyfloating,2);
@@ -1501,5 +1526,5 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GetLevelCoordinates(&xyz_list_boundary,xyz_list,MeshVertexonboundaryEnum,1.);
-	Input* icelevelset_input = element->GetInput(MaskIceLevelsetEnum); _assert_(icelevelset_input);
+	Input2* icelevelset_input = element->GetInput2(MaskIceLevelsetEnum); _assert_(icelevelset_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -1563,9 +1588,9 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* thickness_input=element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* vx_input=element->GetInput(VxEnum);               _assert_(vx_input);
-	Input* vy_input    = NULL;
+	Input2* thickness_input=element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input=element->GetInput2(VxEnum);               _assert_(vx_input);
+	Input2* vy_input    = NULL;
 	if(dim==2){
-		vy_input    = element->GetInput(VyEnum);       _assert_(vy_input);
+		vy_input    = element->GetInput2(VyEnum);       _assert_(vy_input);
 	}
 
@@ -1680,6 +1705,6 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input*     thickness_input=element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input*     surface_input  =element->GetInput(SurfaceEnum);   _assert_(surface_input);
+	Input2*     thickness_input=element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2*     surface_input  =element->GetInput2(SurfaceEnum);   _assert_(surface_input);
 	IssmDouble rhog = element->FindParam(MaterialsRhoIceEnum)*element->FindParam(ConstantsGEnum);
 
@@ -1744,7 +1769,7 @@
 
 	/*Retrieve all inputs and parameters*/
-	Input* thickness_input = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* base_input       = element->GetInput(BaseEnum);       _assert_(base_input);
-	Input* sealevel_input       = element->GetInput(SealevelEnum);       _assert_(sealevel_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* base_input       = element->GetInput2(BaseEnum);       _assert_(base_input);
+	Input2* sealevel_input       = element->GetInput2(SealevelEnum);       _assert_(sealevel_input);
 	IssmDouble rho_water   = element->FindParam(MaterialsRhoSeawaterEnum);
 	IssmDouble rho_ice     = element->FindParam(MaterialsRhoIceEnum);
@@ -1941,5 +1966,5 @@
 		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
 	}
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 	xDelete<IssmDouble>(pressure);
 	xDelete<IssmDouble>(thickness);
@@ -1964,9 +1989,9 @@
 	/*Fetch dof list and allocate solution vectors*/
 	basalelement->GetDofListLocal(&doflist,SSAApproximationEnum,GsetEnum);
-	IssmDouble* values    = xNew<IssmDouble>(numdof);
-	IssmDouble* vx        = xNew<IssmDouble>(numnodes);
-	IssmDouble* vy        = xNew<IssmDouble>(numnodes);
-	IssmDouble* vz        = xNew<IssmDouble>(numnodes);
-	IssmDouble* vel       = xNew<IssmDouble>(numnodes);
+	IssmDouble* values = xNew<IssmDouble>(numdof);
+	IssmDouble* vx     = xNew<IssmDouble>(numnodes);
+	IssmDouble* vy     = xNew<IssmDouble>(numnodes);
+	IssmDouble* vz     = xNew<IssmDouble>(numnodes);
+	IssmDouble* vel    = xNew<IssmDouble>(numnodes);
 
 	/*Use the dof list to index into the solution vector: */
@@ -2000,7 +2025,7 @@
 
 	/*Add vx and vy as inputs to the tria element: */
-	element->AddBasalInput(VxEnum,vx,element->GetElementType());
-	if(dim==2)element->AddBasalInput(VyEnum,vy,element->GetElementType());
-	element->AddBasalInput(VelEnum,vel,element->GetElementType());
+	element->AddBasalInput2(VxEnum,vx,element->GetElementType());
+	if(dim==2)element->AddBasalInput2(VyEnum,vy,element->GetElementType());
+	element->AddBasalInput2(VelEnum,vel,element->GetElementType());
 
 	/*Free ressources:*/
@@ -2062,7 +2087,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* surface_input = element->GetInput(SurfaceEnum); _assert_(surface_input);
-	Input* vx_input      = element->GetInput(VxEnum);      _assert_(vx_input);
-	Input* vy_input      = element->GetInput(VyEnum);      _assert_(vy_input);
+	Input2* surface_input = element->GetInput2(SurfaceEnum); _assert_(surface_input);
+	Input2* vx_input      = element->GetInput2(VxEnum);      _assert_(vx_input);
+	Input2* vy_input      = element->GetInput2(VyEnum);      _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -2149,6 +2174,6 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input*     thickness_input=element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input*     surface_input  =element->GetInput(SurfaceEnum);   _assert_(surface_input);
+	Input2*     thickness_input=element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2*     surface_input  =element->GetInput2(SurfaceEnum);   _assert_(surface_input);
 	IssmDouble rhog = element->FindParam(MaterialsRhoIceEnum)*element->FindParam(ConstantsGEnum);
 
@@ -2200,7 +2225,7 @@
 
 	/*Retrieve all inputs and parameters*/
-	Input* thickness_input = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* base_input       = element->GetInput(BaseEnum);       _assert_(base_input);
-	Input* sealevel_input       = element->GetInput(SealevelEnum);       _assert_(sealevel_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* base_input       = element->GetInput2(BaseEnum);       _assert_(base_input);
+	Input2* sealevel_input       = element->GetInput2(SealevelEnum);       _assert_(sealevel_input);
 	IssmDouble rho_water   = element->FindParam(MaterialsRhoSeawaterEnum);
 	IssmDouble rho_ice     = element->FindParam(MaterialsRhoIceEnum);
@@ -2270,5 +2295,5 @@
 		for(i=0;i<numvertices;i++) pressure[i]=rho_ice*g*(surface[i]-xyz_list[i*3+2]);
 	}
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 	xDelete<IssmDouble>(pressure);
 	xDelete<IssmDouble>(thickness);
@@ -2323,7 +2348,7 @@
 
 	/*Add vx and vy as inputs to the tria element: */
-	element->AddBasalInput(VxEnum,vx,element->GetElementType());
-	element->AddBasalInput(VyEnum,vy,element->GetElementType());
-	element->AddBasalInput(VelEnum,vel,element->GetElementType());
+	element->AddBasalInput2(VxEnum,vx,element->GetElementType());
+	element->AddBasalInput2(VyEnum,vy,element->GetElementType());
+	element->AddBasalInput2(VelEnum,vel,element->GetElementType());
 
 	/*Free ressources:*/
@@ -2359,6 +2384,6 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input = element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum); _assert_(vy_input);
+	Input2* vx_input = element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum); _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -2445,5 +2470,5 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->FindParam(&friction_style,GroundinglineFrictionInterpolationEnum);
-	Input* gllevelset_input = NULL;
+	Input2* gllevelset_input = NULL;
 
 	/*build friction object, used later on: */
@@ -2453,5 +2478,5 @@
 	if(!(friction_style==SubelementFriction2Enum)) phi=element->GetGroundedPortion(xyz_list_base);
 	if(friction_style==SubelementFriction2Enum){
-		gllevelset_input=element->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+		gllevelset_input=element->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
 		element->GetGroundedPart(&point1,&fraction1,&fraction2,&mainlyfloating);
 		gauss = element->NewGauss(point1,fraction1,fraction2,mainlyfloating,2);
@@ -2513,5 +2538,5 @@
 	/*Intermediaries*/
 	int         dim,bsize;
-	IssmDouble  viscosity,thickness,Jdet;
+	IssmDouble  viscosity,Jdet;
 	IssmDouble *xyz_list = NULL;
 
@@ -2529,8 +2554,8 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input    = element->GetInput(VxEnum);       _assert_(vx_input);
-	Input* vy_input    = NULL;
+	Input2* vx_input    = element->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2* vy_input    = NULL;
 	if(dim==3){
-		vy_input=element->GetInput(VyEnum);          _assert_(vy_input);
+		vy_input=element->GetInput2(VyEnum);          _assert_(vy_input);
 	}
 
@@ -2677,5 +2702,5 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input*     surface_input = element->GetInput(SurfaceEnum);   _assert_(surface_input);
+	Input2*     surface_input = element->GetInput2(SurfaceEnum);   _assert_(surface_input);
 	IssmDouble rhog = element->FindParam(MaterialsRhoIceEnum)*element->FindParam(ConstantsGEnum);
 
@@ -2733,6 +2758,6 @@
 
 	/*Retrieve all inputs and parameters*/
-	Input* surface_input = element->GetInput(SurfaceEnum); _assert_(surface_input);
-	Input* sealevel_input       = element->GetInput(SealevelEnum);       _assert_(sealevel_input);
+	Input2* surface_input = element->GetInput2(SurfaceEnum); _assert_(surface_input);
+	Input2* sealevel_input       = element->GetInput2(SealevelEnum);       _assert_(sealevel_input);
 	IssmDouble rho_water = element->FindParam(MaterialsRhoSeawaterEnum);
 	IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
@@ -2938,5 +2963,5 @@
 		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
 	}
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 	xDelete<IssmDouble>(pressure);
 	xDelete<IssmDouble>(surface);
@@ -2983,7 +3008,7 @@
 
 	/*Add vx and vy as inputs to the element: */
-	element->AddInput(VxEnum,vx,element->GetElementType());
-	if(dim==3)element->AddInput(VyEnum,vy,element->GetElementType());
-	element->AddInput(VelEnum,vel,element->GetElementType());
+	element->AddInput2(VxEnum,vx,element->GetElementType());
+	if(dim==3)element->AddInput2(VyEnum,vy,element->GetElementType());
+	element->AddInput2(VelEnum,vel,element->GetElementType());
 
 	/*Free ressources:*/
@@ -3051,7 +3076,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input = element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input = element->GetInput(VzEnum); _assert_(vz_input);
+	Input2* vx_input = element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input = element->GetInput2(VzEnum); _assert_(vz_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -3141,5 +3166,5 @@
 	/*If on not water or not FS, skip stiffness: */
 	int approximation,shelf_dampening;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=FSApproximationEnum && approximation!=SSAFSApproximationEnum && approximation!=HOFSApproximationEnum) return NULL;
 	element->FindParam(&shelf_dampening,StressbalanceShelfDampeningEnum);
@@ -3174,5 +3199,5 @@
 	IssmDouble  rho_water     = element->FindParam(MaterialsRhoSeawaterEnum);
 	IssmDouble  gravity       = element->FindParam(ConstantsGEnum);
-	Input*      base_input = element->GetInput(BaseEnum); _assert_(base_input);
+	Input2*      base_input = element->GetInput2(BaseEnum); _assert_(base_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -3232,8 +3257,8 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input = NULL;
-	if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+	Input2* vx_input=element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input = NULL;
+	if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 	/* Start  looping on the number of gaussian points: */
@@ -3382,8 +3407,8 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	//element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input = NULL;
-	if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+	Input2* vx_input=element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input = NULL;
+	if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 
@@ -3456,8 +3481,8 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input = element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input = element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input = NULL;
-	if(dim==3){vz_input = element->GetInput(VzEnum); _assert_(vz_input);}
+	Input2* vx_input = element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input = element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input = NULL;
+	if(dim==3){vz_input = element->GetInput2(VzEnum); _assert_(vz_input);}
 
 	/* Start  looping on the number of gaussian points: */
@@ -3581,8 +3606,8 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input;
-	if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+	Input2* vx_input=element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input;
+	if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 	/* Start  looping on the number of gaussian points: */
@@ -3623,5 +3648,5 @@
 	/*If on water or not FS, skip stiffness: */
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=FSApproximationEnum && approximation!=SSAFSApproximationEnum && approximation!=HOFSApproximationEnum) return NULL;
 
@@ -3650,8 +3675,8 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input         = element->GetInput(VxEnum);      _assert_(vx_input);
-	Input* vy_input         = element->GetInput(VyEnum);      _assert_(vy_input);
-	Input* vz_input         = NULL;
-	if(dim==3){    vz_input = element->GetInput(VzEnum);      _assert_(vz_input);}
+	Input2* vx_input         = element->GetInput2(VxEnum);      _assert_(vx_input);
+	Input2* vy_input         = element->GetInput2(VyEnum);      _assert_(vy_input);
+	Input2* vz_input         = NULL;
+	if(dim==3){    vz_input = element->GetInput2(VzEnum);      _assert_(vz_input);}
 
 	/* Start  looping on the number of gaussian points: */
@@ -3728,5 +3753,5 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
-	Input*  alpha2_input=element->GetInput(FrictionCoefficientEnum); _assert_(alpha2_input);
+	Input2*  alpha2_input=element->GetInput2(FrictionCoefficientEnum); _assert_(alpha2_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -3782,7 +3807,7 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
-	Input*  sigmann_input=element->GetInput(VzEnum); _assert_(sigmann_input);
-	Input*  sigmant_input=element->GetInput(TemperatureEnum); _assert_(sigmant_input);
-	Input*  bedslope_input=element->GetInput(BedSlopeXEnum);     _assert_(bedslope_input);
+	Input2*  sigmann_input=element->GetInput2(VzEnum); _assert_(sigmann_input);
+	Input2*  sigmant_input=element->GetInput2(TemperatureEnum); _assert_(sigmant_input);
+	Input2*  bedslope_input=element->GetInput2(BedSlopeXEnum);     _assert_(bedslope_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -3899,5 +3924,5 @@
 	/*If on water or not FS, skip stiffness: */
 	int approximation;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=FSApproximationEnum && approximation!=SSAFSApproximationEnum && approximation!=HOFSApproximationEnum) return NULL;
 
@@ -3927,5 +3952,5 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->FindParam(&friction_style,GroundinglineFrictionInterpolationEnum);
-	Input* gllevelset_input = NULL;
+	Input2* gllevelset_input = NULL;
 
 	/*build friction object, used later on: */
@@ -3936,5 +3961,5 @@
 	if(friction_style==SubelementFriction2Enum){
 		if(domaintype==Domain2DverticalEnum) _error_("Subelement Friction 2 not implemented yet for Flowline");
-		gllevelset_input=element->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+		gllevelset_input=element->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
 		element->GetGroundedPart(&point1,&fraction1,&fraction2,&mainlyfloating);
 		//gauss = element->NewGauss(point1,fraction1,fraction2,mainlyfloating,2);
@@ -4068,9 +4093,9 @@
 	IssmDouble  rho_ice =element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble  gravity =element->FindParam(ConstantsGEnum);
-	Input*      loadingforcex_input=element->GetInput(LoadingforceXEnum);  _assert_(loadingforcex_input);
-	Input*      loadingforcey_input=element->GetInput(LoadingforceYEnum);  _assert_(loadingforcey_input);
-	Input*      loadingforcez_input=NULL;
+	Input2*      loadingforcex_input=element->GetInput2(LoadingforceXEnum);  _assert_(loadingforcex_input);
+	Input2*      loadingforcey_input=element->GetInput2(LoadingforceYEnum);  _assert_(loadingforcey_input);
+	Input2*      loadingforcez_input=NULL;
 	if(dim==3){
-		loadingforcez_input=element->GetInput(LoadingforceZEnum);  _assert_(loadingforcez_input);
+		loadingforcez_input=element->GetInput2(LoadingforceZEnum);  _assert_(loadingforcez_input);
 	}
 
@@ -4146,6 +4171,6 @@
 	element->GetIcefrontCoordinates(&xyz_list_front,xyz_list,MaskIceLevelsetEnum);
 	element->NormalSection(&normal[0],xyz_list_front);
-	Input* surface_input  = element->GetInput(SurfaceEnum); _assert_(surface_input);
-	Input* sealevel_input       = element->GetInput(SealevelEnum);       _assert_(sealevel_input);
+	Input2* surface_input  = element->GetInput2(SurfaceEnum); _assert_(surface_input);
+	Input2* sealevel_input       = element->GetInput2(SealevelEnum);       _assert_(sealevel_input);
 	IssmDouble  rho_water = element->FindParam(MaterialsRhoSeawaterEnum);
 	IssmDouble  gravity   = element->FindParam(ConstantsGEnum);
@@ -4215,5 +4240,5 @@
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
-	Input*      base_input=element->GetInput(BaseEnum); _assert_(base_input);
+	Input2*      base_input=element->GetInput2(BaseEnum); _assert_(base_input);
 	IssmDouble  rho_water=element->FindParam(MaterialsRhoSeawaterEnum);
 	IssmDouble  gravity  =element->FindParam(ConstantsGEnum);
@@ -4239,5 +4264,5 @@
 	element->FindParam(&shelf_dampening,StressbalanceShelfDampeningEnum);
 	if(shelf_dampening) {
-		Input*      mb_input=element->GetInput(BasalforcingsFloatingiceMeltingRateEnum); _assert_(mb_input);
+		Input2*      mb_input=element->GetInput2(BasalforcingsFloatingiceMeltingRateEnum); _assert_(mb_input);
 		IssmDouble  dt,mb;
 		element->FindParam(&dt,TimesteppingTimeStepEnum);
@@ -4290,6 +4315,6 @@
 
 	/*Get pressure and sigmann*/
-	Input* pressure_input=element->GetInput(PressureEnum); _assert_(pressure_input);
-	Input* sigmann_input =element->GetInput(SigmaNNEnum);  _assert_(sigmann_input);
+	Input2* pressure_input=element->GetInput2(PressureEnum); _assert_(pressure_input);
+	Input2* sigmann_input =element->GetInput2(SigmaNNEnum);  _assert_(sigmann_input);
 
 	gauss=element->NewGauss(5);
@@ -4383,19 +4408,19 @@
 
 	/*Get d and tau*/
-	Input* epsxx_input=element->GetInput(StrainRatexxEnum); _assert_(epsxx_input);
-	Input* epsyy_input=element->GetInput(StrainRateyyEnum); _assert_(epsyy_input);
-	Input* epsxy_input=element->GetInput(StrainRatexyEnum); _assert_(epsxy_input);
-	Input* epszz_input=NULL; Input* epsxz_input=NULL; Input* epsyz_input=NULL;
-	Input* sigmapxx_input=element->GetInput(DeviatoricStressxxEnum); _assert_(sigmapxx_input);
-	Input* sigmapyy_input=element->GetInput(DeviatoricStressyyEnum); _assert_(sigmapyy_input);
-	Input* sigmapxy_input=element->GetInput(DeviatoricStressxyEnum); _assert_(sigmapxy_input);
-	Input* sigmapzz_input=NULL; Input* sigmapxz_input=NULL; Input* sigmapyz_input=NULL;
+	Input2* epsxx_input=element->GetInput2(StrainRatexxEnum); _assert_(epsxx_input);
+	Input2* epsyy_input=element->GetInput2(StrainRateyyEnum); _assert_(epsyy_input);
+	Input2* epsxy_input=element->GetInput2(StrainRatexyEnum); _assert_(epsxy_input);
+	Input2* epszz_input=NULL; Input2* epsxz_input=NULL; Input2* epsyz_input=NULL;
+	Input2* sigmapxx_input=element->GetInput2(DeviatoricStressxxEnum); _assert_(sigmapxx_input);
+	Input2* sigmapyy_input=element->GetInput2(DeviatoricStressyyEnum); _assert_(sigmapyy_input);
+	Input2* sigmapxy_input=element->GetInput2(DeviatoricStressxyEnum); _assert_(sigmapxy_input);
+	Input2* sigmapzz_input=NULL; Input2* sigmapxz_input=NULL; Input2* sigmapyz_input=NULL;
 	if(dim==3){
-		epszz_input=element->GetInput(StrainRatezzEnum); _assert_(epszz_input);
-		epsxz_input=element->GetInput(StrainRatexzEnum); _assert_(epsxz_input);
-		epsyz_input=element->GetInput(StrainRateyzEnum); _assert_(epsyz_input);
-		sigmapzz_input=element->GetInput(DeviatoricStresszzEnum); _assert_(sigmapzz_input);
-		sigmapxz_input=element->GetInput(DeviatoricStressxzEnum); _assert_(sigmapxz_input);
-		sigmapyz_input=element->GetInput(DeviatoricStressyzEnum); _assert_(sigmapyz_input);
+		epszz_input=element->GetInput2(StrainRatezzEnum); _assert_(epszz_input);
+		epsxz_input=element->GetInput2(StrainRatexzEnum); _assert_(epsxz_input);
+		epsyz_input=element->GetInput2(StrainRateyzEnum); _assert_(epsyz_input);
+		sigmapzz_input=element->GetInput2(DeviatoricStresszzEnum); _assert_(sigmapzz_input);
+		sigmapxz_input=element->GetInput2(DeviatoricStressxzEnum); _assert_(sigmapxz_input);
+		sigmapyz_input=element->GetInput2(DeviatoricStressyzEnum); _assert_(sigmapyz_input);
 	}
 
@@ -5059,5 +5084,5 @@
 	int*         vdoflist=NULL;
 	int*         pdoflist=NULL;
-	Input*       vz_input=NULL;
+	Input2*       vz_input=NULL;
 	int          dim;
 	IssmDouble   vx,vy,vz,p;
@@ -5081,8 +5106,8 @@
 	element->GetDofListVelocity(&vdoflist,GsetEnum);
 	element->GetDofListPressure(&pdoflist,GsetEnum);
-	Input*     vx_input=element->GetInput(VxEnum);       _assert_(vx_input);
-	Input*     vy_input=element->GetInput(VyEnum);       _assert_(vy_input);
-	if(dim==3){vz_input=element->GetInput(VzEnum);       _assert_(vz_input);}
-	Input*     p_input =element->GetInput(PressureEnum); _assert_(p_input);
+	Input2*     vx_input=element->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2*     vy_input=element->GetInput2(VyEnum);       _assert_(vy_input);
+	if(dim==3){vz_input=element->GetInput2(VzEnum);       _assert_(vz_input);}
+	Input2*     p_input =element->GetInput2(PressureEnum); _assert_(p_input);
 
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
@@ -5142,8 +5167,8 @@
 		/*Get inputs and parameters*/
 		element->GetVerticesCoordinates(&xyz_list);
-		Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-		Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
-		Input* vz_input;
-		if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+		Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+		Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
+		Input2* vz_input;
+		if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 		/*Allocate new inputs*/
@@ -5184,11 +5209,11 @@
 
 		/*Add inputs*/
-		element->AddInput(StrainRatexxEnum,epsxx,P1DGEnum); element->AddInput(DeviatoricStressxxEnum,sigmapxx,P1DGEnum);
-		element->AddInput(StrainRateyyEnum,epsyy,P1DGEnum); element->AddInput(DeviatoricStressyyEnum,sigmapyy,P1DGEnum);
-		element->AddInput(StrainRatexyEnum,epsxy,P1DGEnum); element->AddInput(DeviatoricStressxyEnum,sigmapxy,P1DGEnum);
+		element->AddInput2(StrainRatexxEnum,epsxx,P1DGEnum); element->AddInput2(DeviatoricStressxxEnum,sigmapxx,P1DGEnum);
+		element->AddInput2(StrainRateyyEnum,epsyy,P1DGEnum); element->AddInput2(DeviatoricStressyyEnum,sigmapyy,P1DGEnum);
+		element->AddInput2(StrainRatexyEnum,epsxy,P1DGEnum); element->AddInput2(DeviatoricStressxyEnum,sigmapxy,P1DGEnum);
 		if(dim==3){
-			element->AddInput(StrainRatezzEnum,epszz,P1DGEnum); element->AddInput(DeviatoricStresszzEnum,sigmapzz,P1DGEnum);
-			element->AddInput(StrainRatexzEnum,epsxz,P1DGEnum); element->AddInput(DeviatoricStressxzEnum,sigmapxz,P1DGEnum);
-			element->AddInput(StrainRateyzEnum,epsyz,P1DGEnum); element->AddInput(DeviatoricStressyzEnum,sigmapyz,P1DGEnum);
+			element->AddInput2(StrainRatezzEnum,epszz,P1DGEnum); element->AddInput2(DeviatoricStresszzEnum,sigmapzz,P1DGEnum);
+			element->AddInput2(StrainRatexzEnum,epsxz,P1DGEnum); element->AddInput2(DeviatoricStressxzEnum,sigmapxz,P1DGEnum);
+			element->AddInput2(StrainRateyzEnum,epsyz,P1DGEnum); element->AddInput2(DeviatoricStressyzEnum,sigmapyz,P1DGEnum);
 		}
 
@@ -5273,12 +5298,12 @@
 
 	/*Add vx and vy as inputs to the tria element: */
-	int fe_v = element->VelocityInterpolation();
-	//if(fe_v == P1bubblecondensedEnum) fe_v = P1Enum;
-	//if(fe_v == P1bubbleEnum) fe_v = P1Enum;
-	element->AddInput(VxEnum, vx, fe_v);
-	element->AddInput(VyEnum, vy, fe_v);
-	element->AddInput(VelEnum,vel,fe_v);
-	if(pnumdof>0) element->AddInput(PressureEnum,pressure,element->PressureInterpolation());
-	if(dim==3) element->AddInput(VzEnum,vz, fe_v);
+	int v_interp =  element->VelocityInterpolation();
+	if(v_interp==P1bubbleEnum) v_interp=P1Enum;
+	if(v_interp == P1bubblecondensedEnum) v_interp = P1Enum;
+	element->AddInput2(VxEnum, vx, v_interp);
+	element->AddInput2(VyEnum, vy, v_interp);
+	element->AddInput2(VelEnum,vel,v_interp);
+	if(pnumdof>0) element->AddInput2(PressureEnum,pressure,element->PressureInterpolation());
+	if(dim==3) element->AddInput2(VzEnum,vz,v_interp);
 
 	/*Free ressources:*/
@@ -5314,10 +5339,10 @@
 		/*Get inputs and parameters*/
 		element->GetVerticesCoordinates(&xyz_list);
-		Input*  B_input=element->GetInput(MaterialsRheologyBEnum); _assert_(B_input);
-		Input*  n_input=element->GetInput(MaterialsRheologyNEnum); _assert_(n_input);
-		Input* vx_input=element->GetInput(VxEnum);                 _assert_(vx_input);
-		Input* vy_input=element->GetInput(VyEnum);                 _assert_(vy_input);
-		Input* vz_input;
-		if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+		Input2*  B_input=element->GetInput2(MaterialsRheologyBEnum); _assert_(B_input);
+		Input2*  n_input=element->GetInput2(MaterialsRheologyNEnum); _assert_(n_input);
+		Input2* vx_input=element->GetInput2(VxEnum);                 _assert_(vx_input);
+		Input2* vy_input=element->GetInput2(VyEnum);                 _assert_(vy_input);
+		Input2* vz_input;
+		if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 		/*Fetch number of nodes and dof for this finite element*/
@@ -5338,23 +5363,23 @@
 
 		/*Get previous d*/
-		Input* epsxx_input=element->GetInput(StrainRatexxEnum); _assert_(epsxx_input);
-		Input* epsyy_input=element->GetInput(StrainRateyyEnum); _assert_(epsyy_input);
-		Input* epsxy_input=element->GetInput(StrainRatexyEnum); _assert_(epsxy_input);
-		Input* epszz_input=NULL; Input* epsxz_input=NULL; Input* epsyz_input=NULL;
+		Input2* epsxx_input=element->GetInput2(StrainRatexxEnum); _assert_(epsxx_input);
+		Input2* epsyy_input=element->GetInput2(StrainRateyyEnum); _assert_(epsyy_input);
+		Input2* epsxy_input=element->GetInput2(StrainRatexyEnum); _assert_(epsxy_input);
+		Input2* epszz_input=NULL; Input2* epsxz_input=NULL; Input2* epsyz_input=NULL;
 		if(dim==3){
-			epszz_input=element->GetInput(StrainRatezzEnum); _assert_(epszz_input);
-			epsxz_input=element->GetInput(StrainRatexzEnum); _assert_(epsxz_input);
-			epsyz_input=element->GetInput(StrainRateyzEnum); _assert_(epsyz_input);
+			epszz_input=element->GetInput2(StrainRatezzEnum); _assert_(epszz_input);
+			epsxz_input=element->GetInput2(StrainRatexzEnum); _assert_(epsxz_input);
+			epsyz_input=element->GetInput2(StrainRateyzEnum); _assert_(epsyz_input);
 		}
 
 		/*Get tau*/
-		Input* sigmapxx_input=element->GetInput(DeviatoricStressxxEnum); _assert_(sigmapxx_input);
-		Input* sigmapyy_input=element->GetInput(DeviatoricStressyyEnum); _assert_(sigmapyy_input);
-		Input* sigmapxy_input=element->GetInput(DeviatoricStressxyEnum); _assert_(sigmapxy_input);
-		Input* sigmapzz_input=NULL; Input* sigmapxz_input=NULL; Input* sigmapyz_input=NULL;
+		Input2* sigmapxx_input=element->GetInput2(DeviatoricStressxxEnum); _assert_(sigmapxx_input);
+		Input2* sigmapyy_input=element->GetInput2(DeviatoricStressyyEnum); _assert_(sigmapyy_input);
+		Input2* sigmapxy_input=element->GetInput2(DeviatoricStressxyEnum); _assert_(sigmapxy_input);
+		Input2* sigmapzz_input=NULL; Input2* sigmapxz_input=NULL; Input2* sigmapyz_input=NULL;
 		if(dim==3){
-			sigmapzz_input=element->GetInput(DeviatoricStresszzEnum); _assert_(sigmapzz_input);
-			sigmapxz_input=element->GetInput(DeviatoricStressxzEnum); _assert_(sigmapxz_input);
-			sigmapyz_input=element->GetInput(DeviatoricStressyzEnum); _assert_(sigmapyz_input);
+			sigmapzz_input=element->GetInput2(DeviatoricStresszzEnum); _assert_(sigmapzz_input);
+			sigmapxz_input=element->GetInput2(DeviatoricStressxzEnum); _assert_(sigmapxz_input);
+			sigmapyz_input=element->GetInput2(DeviatoricStressyzEnum); _assert_(sigmapyz_input);
 		}
 
@@ -5470,7 +5495,7 @@
 			for(int i=0;i<3;i++) _assert_(!xIsNan<IssmDouble>(d_yy[i]));
 			for(int i=0;i<3;i++) _assert_(!xIsNan<IssmDouble>(d_xx[i]));
-			element->AddInput(StrainRatexxEnum,d_xx,P1DGEnum);
-			element->AddInput(StrainRateyyEnum,d_yy,P1DGEnum);
-			element->AddInput(StrainRatexyEnum,d_xy,P1DGEnum);
+			element->AddInput2(StrainRatexxEnum,d_xx,P1DGEnum);
+			element->AddInput2(StrainRateyyEnum,d_yy,P1DGEnum);
+			element->AddInput2(StrainRatexyEnum,d_xy,P1DGEnum);
 		}
 		else{
@@ -5485,10 +5510,10 @@
 			Matrix4x4Solve(&d_xz[0],Ke,pe_xz);
 			Matrix4x4Solve(&d_yz[0],Ke,pe_yz);
-			element->AddInput(StrainRatexxEnum,d_xx,P1DGEnum);
-			element->AddInput(StrainRateyyEnum,d_yy,P1DGEnum);
-			element->AddInput(StrainRatexyEnum,d_xy,P1DGEnum);
-			element->AddInput(StrainRatezzEnum,d_zz,P1DGEnum);
-			element->AddInput(StrainRatexzEnum,d_xz,P1DGEnum);
-			element->AddInput(StrainRateyzEnum,d_yz,P1DGEnum);
+			element->AddInput2(StrainRatexxEnum,d_xx,P1DGEnum);
+			element->AddInput2(StrainRateyyEnum,d_yy,P1DGEnum);
+			element->AddInput2(StrainRatexyEnum,d_xy,P1DGEnum);
+			element->AddInput2(StrainRatezzEnum,d_zz,P1DGEnum);
+			element->AddInput2(StrainRatexzEnum,d_xz,P1DGEnum);
+			element->AddInput2(StrainRateyzEnum,d_yz,P1DGEnum);
 		}
 
@@ -5527,29 +5552,29 @@
 		/*Get inputs and parameters*/
 		element->GetVerticesCoordinates(&xyz_list);
-		Input* vx_input=element->GetInput(VxEnum);                 _assert_(vx_input);
-		Input* vy_input=element->GetInput(VyEnum);                 _assert_(vy_input);
-		Input* vz_input=NULL;
-		if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+		Input2* vx_input=element->GetInput2(VxEnum);                 _assert_(vx_input);
+		Input2* vy_input=element->GetInput2(VyEnum);                 _assert_(vy_input);
+		Input2* vz_input=NULL;
+		if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 		/*Get previous tau*/
-		Input* sigmapxx_input=element->GetInput(DeviatoricStressxxEnum); _assert_(sigmapxx_input);
-		Input* sigmapyy_input=element->GetInput(DeviatoricStressyyEnum); _assert_(sigmapyy_input);
-		Input* sigmapxy_input=element->GetInput(DeviatoricStressxyEnum); _assert_(sigmapxy_input);
-		Input* sigmapzz_input=NULL; Input* sigmapxz_input=NULL; Input* sigmapyz_input=NULL;
+		Input2* sigmapxx_input=element->GetInput2(DeviatoricStressxxEnum); _assert_(sigmapxx_input);
+		Input2* sigmapyy_input=element->GetInput2(DeviatoricStressyyEnum); _assert_(sigmapyy_input);
+		Input2* sigmapxy_input=element->GetInput2(DeviatoricStressxyEnum); _assert_(sigmapxy_input);
+		Input2* sigmapzz_input=NULL; Input2* sigmapxz_input=NULL; Input2* sigmapyz_input=NULL;
 		if(dim==3){
-			sigmapzz_input=element->GetInput(DeviatoricStresszzEnum); _assert_(sigmapzz_input);
-			sigmapxz_input=element->GetInput(DeviatoricStressxzEnum); _assert_(sigmapxz_input);
-			sigmapyz_input=element->GetInput(DeviatoricStressyzEnum); _assert_(sigmapyz_input);
+			sigmapzz_input=element->GetInput2(DeviatoricStresszzEnum); _assert_(sigmapzz_input);
+			sigmapxz_input=element->GetInput2(DeviatoricStressxzEnum); _assert_(sigmapxz_input);
+			sigmapyz_input=element->GetInput2(DeviatoricStressyzEnum); _assert_(sigmapyz_input);
 		}
 
 		/*Get NEW d*/
-		Input* epsxx_input=element->GetInput(StrainRatexxEnum); _assert_(epsxx_input);
-		Input* epsyy_input=element->GetInput(StrainRateyyEnum); _assert_(epsyy_input);
-		Input* epsxy_input=element->GetInput(StrainRatexyEnum); _assert_(epsxy_input);
-		Input* epszz_input=NULL; Input* epsxz_input=NULL; Input* epsyz_input=NULL;
+		Input2* epsxx_input=element->GetInput2(StrainRatexxEnum); _assert_(epsxx_input);
+		Input2* epsyy_input=element->GetInput2(StrainRateyyEnum); _assert_(epsyy_input);
+		Input2* epsxy_input=element->GetInput2(StrainRatexyEnum); _assert_(epsxy_input);
+		Input2* epszz_input=NULL; Input2* epsxz_input=NULL; Input2* epsyz_input=NULL;
 		if(dim==3){
-			epszz_input=element->GetInput(StrainRatezzEnum); _assert_(epszz_input);
-			epsxz_input=element->GetInput(StrainRatexzEnum); _assert_(epsxz_input);
-			epsyz_input=element->GetInput(StrainRateyzEnum); _assert_(epsyz_input);
+			epszz_input=element->GetInput2(StrainRatezzEnum); _assert_(epszz_input);
+			epsxz_input=element->GetInput2(StrainRatexzEnum); _assert_(epsxz_input);
+			epsyz_input=element->GetInput2(StrainRateyzEnum); _assert_(epsyz_input);
 		}
 
@@ -5620,11 +5645,11 @@
 
 		/*Add inputs*/
-		element->AddInput(DeviatoricStressxxEnum,tau_xx,P1DGEnum);
-		element->AddInput(DeviatoricStressyyEnum,tau_yy,P1DGEnum);
-		element->AddInput(DeviatoricStressxyEnum,tau_xy,P1DGEnum);
+		element->AddInput2(DeviatoricStressxxEnum,tau_xx,P1DGEnum);
+		element->AddInput2(DeviatoricStressyyEnum,tau_yy,P1DGEnum);
+		element->AddInput2(DeviatoricStressxyEnum,tau_xy,P1DGEnum);
 		if(dim==3){
-			element->AddInput(DeviatoricStresszzEnum,tau_zz,P1DGEnum);
-			element->AddInput(DeviatoricStressxzEnum,tau_xz,P1DGEnum);
-			element->AddInput(DeviatoricStressyzEnum,tau_yz,P1DGEnum);
+			element->AddInput2(DeviatoricStresszzEnum,tau_zz,P1DGEnum);
+			element->AddInput2(DeviatoricStressxzEnum,tau_xz,P1DGEnum);
+			element->AddInput2(DeviatoricStressyzEnum,tau_yz,P1DGEnum);
 		}
 
@@ -5752,5 +5777,5 @@
 
 	/*If on water or not FS, skip stiffness: */
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(element->IsFloating() || !element->IsOnBase()) return NULL;
 
@@ -5784,7 +5809,7 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_tria);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input=element->GetInput(VzEnum); _assert_(vz_input);
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input=element->GetInput2(VzEnum); _assert_(vz_input);
 
 	/*build friction object, used later on: */
@@ -5905,7 +5930,7 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input=element->GetInput(VzEnum); _assert_(vz_input);
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input=element->GetInput2(VzEnum); _assert_(vz_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -6092,6 +6117,6 @@
 	/* Get node coordinates and dof list: */
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input   =element->GetInput(VxEnum);       _assert_(vx_input);
-	Input* vy_input   =element->GetInput(VyEnum);       _assert_(vy_input);
+	Input2* vx_input   =element->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2* vy_input   =element->GetInput2(VyEnum);       _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -6245,11 +6270,11 @@
 	/*Initialize Element matrix*/
 	ElementMatrix* Ke=basaltria->NewElementMatrix(SSAApproximationEnum);
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 
 	/*Retrieve all inputs and parameters*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input   =element->GetInput(VxEnum);       _assert_(vx_input);
-	Input* vy_input   =element->GetInput(VyEnum);       _assert_(vy_input);
-	Input* vz_input   =element->GetInput(VzEnum);       _assert_(vz_input);
+	Input2* vx_input   =element->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2* vy_input   =element->GetInput2(VyEnum);       _assert_(vy_input);
+	Input2* vz_input   =element->GetInput2(VzEnum);       _assert_(vz_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -6326,5 +6351,5 @@
 	/*Initialize Element vector and return if necessary*/
 	if(!element->IsOnBase() || element->IsFloating()) return NULL;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=HOFSApproximationEnum) return NULL;
 
@@ -6351,8 +6376,8 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_tria);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=  element->GetInput(VxEnum);   _assert_(vx_input);
-	Input* vy_input=  element->GetInput(VyEnum);   _assert_(vy_input);
-	Input* vz_input=  element->GetInput(VzEnum);   _assert_(vz_input);
-	Input* vzHO_input=element->GetInput(VzHOEnum); _assert_(vzHO_input);
+	Input2* vx_input=  element->GetInput2(VxEnum);   _assert_(vx_input);
+	Input2* vy_input=  element->GetInput2(VyEnum);   _assert_(vy_input);
+	Input2* vz_input=  element->GetInput2(VzEnum);   _assert_(vz_input);
+	Input2* vzHO_input=element->GetInput2(VzHOEnum); _assert_(vzHO_input);
 
 	/*build friction object, used later on: */
@@ -6409,5 +6434,5 @@
 
 	/*Initialize Element vector and return if necessary*/
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=HOFSApproximationEnum) return NULL;
 	int   vnumnodes = element->NumberofNodesVelocity();
@@ -6430,8 +6455,8 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input   =element->GetInput(VxEnum);   _assert_(vx_input);
-	Input* vy_input   =element->GetInput(VyEnum);   _assert_(vy_input);
-	Input* vz_input   =element->GetInput(VzEnum);   _assert_(vz_input);
-	Input* vzHO_input=element->GetInput(VzHOEnum);  _assert_(vzHO_input);
+	Input2* vx_input   =element->GetInput2(VxEnum);   _assert_(vx_input);
+	Input2* vy_input   =element->GetInput2(VyEnum);   _assert_(vy_input);
+	Input2* vz_input   =element->GetInput2(VzEnum);   _assert_(vz_input);
+	Input2* vzHO_input=element->GetInput2(VzHOEnum);  _assert_(vzHO_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -6496,5 +6521,5 @@
 	/*Initialize Element vector and return if necessary*/
 	if(!element->IsOnBase() || element->IsFloating()) return NULL;
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=SSAFSApproximationEnum) return NULL;
 	int vnumnodes = element->NumberofNodesVelocity();
@@ -6518,8 +6543,8 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_tria);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input=   element->GetInput(VxEnum);    _assert_(vx_input);
-	Input* vy_input=   element->GetInput(VyEnum);    _assert_(vy_input);
-	Input* vz_input=   element->GetInput(VzEnum);    _assert_(vz_input);
-	Input* vzSSA_input=element->GetInput(VzSSAEnum); _assert_(vzSSA_input);
+	Input2* vx_input=   element->GetInput2(VxEnum);    _assert_(vx_input);
+	Input2* vy_input=   element->GetInput2(VyEnum);    _assert_(vy_input);
+	Input2* vz_input=   element->GetInput2(VzEnum);    _assert_(vz_input);
+	Input2* vzSSA_input=element->GetInput2(VzSSAEnum); _assert_(vzSSA_input);
 
 	/*build friction object, used later on: */
@@ -6575,5 +6600,5 @@
 
 	/*Initialize Element vector and return if necessary*/
-	element->GetInputValue(&approximation,ApproximationEnum);
+	element->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation!=SSAFSApproximationEnum) return NULL;
 	int vnumnodes = element->NumberofNodesVelocity();
@@ -6596,8 +6621,8 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->FindParam(&FSreconditioning,StressbalanceFSreconditioningEnum);
-	Input* vx_input   =element->GetInput(VxEnum);      _assert_(vx_input);
-	Input* vy_input   =element->GetInput(VyEnum);      _assert_(vy_input);
-	Input* vz_input   =element->GetInput(VzEnum);      _assert_(vz_input);
-	Input* vzSSA_input=element->GetInput(VzSSAEnum);   _assert_(vzSSA_input);
+	Input2* vx_input   =element->GetInput2(VxEnum);      _assert_(vx_input);
+	Input2* vy_input   =element->GetInput2(VyEnum);      _assert_(vy_input);
+	Input2* vz_input   =element->GetInput2(VzEnum);      _assert_(vz_input);
+	Input2* vzSSA_input=element->GetInput2(VzSSAEnum);   _assert_(vzSSA_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -7192,10 +7217,10 @@
 
 	/*Add vx and vy as inputs to element: */
-	element->AddInput(VxEnum,vx,P1Enum);
-	element->AddInput(VyEnum,vy,P1Enum);
-	element->AddInput(VzEnum,vz,P1Enum);
-	element->AddInput(VzFSEnum,vzFS,P1Enum);
-	element->AddInput(VelEnum,vel,P1Enum);
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(VxEnum,vx,P1Enum);
+	element->AddInput2(VyEnum,vy,P1Enum);
+	element->AddInput2(VzEnum,vz,P1Enum);
+	element->AddInput2(VzFSEnum,vzFS,P1Enum);
+	element->AddInput2(VelEnum,vel,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 
 	/*Free ressources:*/
@@ -7295,10 +7320,10 @@
 
 	/*Add vx and vy as inputs to element: */
-	element->AddInput(VxEnum,vx,P1Enum);
-	element->AddInput(VyEnum,vy,P1Enum);
-	element->AddInput(VzEnum,vz,P1Enum);
-	element->AddInput(VzFSEnum,vzFS,P1Enum);
-	element->AddInput(VelEnum,vel,P1Enum);
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(VxEnum,vx,P1Enum);
+	element->AddInput2(VyEnum,vy,P1Enum);
+	element->AddInput2(VzEnum,vz,P1Enum);
+	element->AddInput2(VzFSEnum,vzFS,P1Enum);
+	element->AddInput2(VelEnum,vel,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 
 	/*Free ressources:*/
@@ -7388,8 +7413,8 @@
 
 	/*Add vx and vy as inputs to element: */
-	element->AddInput(VxEnum,vx,P1Enum);
-	element->AddInput(VyEnum,vy,P1Enum);
-	element->AddInput(VelEnum,vel,P1Enum);
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(VxEnum,vx,P1Enum);
+	element->AddInput2(VyEnum,vy,P1Enum);
+	element->AddInput2(VelEnum,vel,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 
 	/*Free ressources:*/
Index: /issm/trunk/src/c/analyses/StressbalanceAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/StressbalanceAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.cpp	(revision 24686)
@@ -107,5 +107,5 @@
 	return 2;
 }/*}}}*/
-void StressbalanceSIAAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void StressbalanceSIAAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Fetch data needed: */
@@ -127,25 +127,32 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
+			/*Need to know the type of approximation for this element*/
+			if(iomodel->Data("md.flowequation.element_equation")){
+				inputs2->SetInput(ApproximationEnum,counter,IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[i])));
+			}
 			counter++;
 		}
 	}
+
+	/*Free data: */
+	iomodel->DeleteData(1,"md.flowequation.element_equation");
 
 	/*Friction law variables*/
 	switch(frictionlaw){
 		case 1:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			break;
 		case 2:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
 			break;
 		case 6:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			break;
 		default:
@@ -153,13 +160,11 @@
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
 	if(ismovingfront){
 		if(iomodel->domaintype!=Domain2DhorizontalEnum)
-			iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum); // required for updating active nodes
-	}
-
-	/*Free data: */
-	iomodel->DeleteData(1,"md.flowequation.element_equation");
+			iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum); // required for updating active nodes
+	}
+
 }/*}}}*/
 void StressbalanceSIAAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -321,13 +326,13 @@
 	IssmDouble  gravity    = element->FindParam(ConstantsGEnum);
 	IssmDouble  B,n;
-	Input* B_input         = element->GetInput(MaterialsRheologyBbarEnum);_assert_(B_input);
-	Input* n_input         = element->GetInput(MaterialsRheologyNEnum);   _assert_(n_input);
-	Input* slopex_input    = element->GetInput(SurfaceSlopeXEnum);        _assert_(slopex_input);
-	Input* slopey_input    = element->GetInput(SurfaceSlopeYEnum);        _assert_(slopey_input);
-	Input* thickness_input = element->GetInput(ThicknessEnum);            _assert_(thickness_input);
-	Input* surface_input   = element->GetInput(SurfaceEnum);              _assert_(surface_input);
-	Input* drag_input      = NULL;
+	Input2* B_input         = element->GetInput2(MaterialsRheologyBbarEnum);_assert_(B_input);
+	Input2* n_input         = element->GetInput2(MaterialsRheologyNEnum);   _assert_(n_input);
+	Input2* slopex_input    = element->GetInput2(SurfaceSlopeXEnum);        _assert_(slopex_input);
+	Input2* slopey_input    = element->GetInput2(SurfaceSlopeYEnum);        _assert_(slopey_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum);            _assert_(thickness_input);
+	Input2* surface_input   = element->GetInput2(SurfaceEnum);              _assert_(surface_input);
+	Input2* drag_input      = NULL;
 	if(frictionlaw!=5 && frictionlaw!=1){
-		drag_input = element->GetInput(FrictionCoefficientEnum);  _assert_(drag_input);
+		drag_input = element->GetInput2(FrictionCoefficientEnum);  _assert_(drag_input);
 	}
 
@@ -411,14 +416,14 @@
 	IssmDouble  gravity    = element->FindParam(ConstantsGEnum);
 	IssmDouble B,n;
-	Input* B_input         = element->GetInput(MaterialsRheologyBEnum);   _assert_(B_input);
-	Input* n_input         = element->GetInput(MaterialsRheologyNEnum);   _assert_(n_input);
-	Input* surface_input   = element->GetInput(SurfaceEnum);              _assert_(surface_input);
-	Input* slopex_input    = element->GetInput(SurfaceSlopeXEnum);        _assert_(slopex_input);
-	Input* slopey_input    = element->GetInput(SurfaceSlopeYEnum);        _assert_(slopey_input);
-	Input* thickness_input = element->GetInput(ThicknessEnum);            _assert_(thickness_input);
-	Input* drag_input      = NULL;
+	Input2* B_input         = element->GetInput2(MaterialsRheologyBEnum);   _assert_(B_input);
+	Input2* n_input         = element->GetInput2(MaterialsRheologyNEnum);   _assert_(n_input);
+	Input2* surface_input   = element->GetInput2(SurfaceEnum);              _assert_(surface_input);
+	Input2* slopex_input    = element->GetInput2(SurfaceSlopeXEnum);        _assert_(slopex_input);
+	Input2* slopey_input    = element->GetInput2(SurfaceSlopeYEnum);        _assert_(slopey_input);
+	Input2* thickness_input = element->GetInput2(ThicknessEnum);            _assert_(thickness_input);
+	Input2* drag_input      = NULL;
 	Friction* friction     = NULL;
 	if(frictionlaw!=5 && frictionlaw!=1){
-		drag_input = element->GetInput(FrictionCoefficientEnum);  _assert_(drag_input);
+		drag_input = element->GetInput2(FrictionCoefficientEnum);  _assert_(drag_input);
 	}
 	else if(frictionlaw==5){
@@ -530,6 +535,6 @@
 	/*Get dof list and inputs */
 	element->GetDofList(&doflist,NoneApproximationEnum,GsetEnum);
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
 
 	/*Ok, we have the velocities in inputs, fill in solution */
@@ -617,8 +622,8 @@
 
 	/*Add vx and vy as inputs to the tria element: */
-	element->AddInput(VxEnum,vx,P1Enum);
-	element->AddInput(VyEnum,vy,P1Enum);
-	element->AddInput(VelEnum,vel,P1Enum);
-	element->AddInput(PressureEnum,pressure,P1Enum);
+	element->AddInput2(VxEnum,vx,P1Enum);
+	element->AddInput2(VyEnum,vy,P1Enum);
+	element->AddInput2(VelEnum,vel,P1Enum);
+	element->AddInput2(PressureEnum,pressure,P1Enum);
 
 	/*Free ressources:*/
Index: /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/StressbalanceSIAAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/StressbalanceVerticalAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceVerticalAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/StressbalanceVerticalAnalysis.cpp	(revision 24686)
@@ -86,5 +86,5 @@
 	return 1;
 }/*}}}*/
-void StressbalanceVerticalAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void StressbalanceVerticalAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*return if not 3d mesh*/
@@ -96,20 +96,20 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,P1Enum);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,P1Enum);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
-	//iomodel->FetchDataToInput(elements,"md.smb.mass_balance",SmbMassBalanceEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.groundedice_melting_rate",BasalforcingsGroundediceMeltingRateEnum);
+	//iomodel->FetchDataToInput(inputs2,elements,"md.smb.mass_balance",SmbMassBalanceEnum);
 
 
@@ -119,5 +119,5 @@
 	switch(basalforcing_model){
 		case FloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.floatingice_melting_rate",BasalforcingsFloatingiceMeltingRateEnum);
 			break;
 		case LinearFloatingMeltRateEnum:
@@ -128,23 +128,23 @@
 			break;
 		case SpatialLinearFloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_melting_rate",BasalforcingsDeepwaterMeltingRateEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.deepwater_elevation",BasalforcingsDeepwaterElevationEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.upperwater_elevation",BasalforcingsUpperwaterElevationEnum);
 			break;
 		case BasalforcingsPicoEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsPicoBasinIdEnum);
 			break;
 		case BasalforcingsIsmip6Enum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.basin_id",BasalforcingsIsmip6BasinIdEnum);
 			break;
 		case BeckmannGoosseFloatingMeltRateEnum:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
-			iomodel->FetchDataToInput(elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_salinity",BasalforcingsOceanSalinityEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.ocean_temp",BasalforcingsOceanTempEnum);
 			break;
 		default:
 			_error_("Basal forcing model "<<EnumToStringx(basalforcing_model)<<" not supported yet");
 	}
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum,0.);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum,0.);
 }/*}}}*/
 void StressbalanceVerticalAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -353,13 +353,13 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->GetInputValue(&approximation,ApproximationEnum);
-	Input* base_input=element->GetInput(BaseEnum);                                               _assert_(base_input);
-	Input* groundedice_input=element->GetInput(MaskGroundediceLevelsetEnum);                     _assert_(groundedice_input);
-	Input* groundedice_melting_input=element->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedice_melting_input);
-	Input* floatingice_melting_input=element->GetInput(BasalforcingsFloatingiceMeltingRateEnum); _assert_(floatingice_melting_input);
-	Input* vx_input=element->GetInput(VxEnum);                                                   _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum);                                                   _assert_(vy_input);
-	Input* vzFS_input=NULL;
+	Input2* base_input=element->GetInput2(BaseEnum);                                               _assert_(base_input);
+	Input2* groundedice_input=element->GetInput2(MaskGroundediceLevelsetEnum);                     _assert_(groundedice_input);
+	Input2* groundedice_melting_input=element->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedice_melting_input);
+	Input2* floatingice_melting_input=element->GetInput2(BasalforcingsFloatingiceMeltingRateEnum); _assert_(floatingice_melting_input);
+	Input2* vx_input=element->GetInput2(VxEnum);                                                   _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum);                                                   _assert_(vy_input);
+	Input2* vzFS_input=NULL;
 	if(approximation==HOFSApproximationEnum || approximation==SSAFSApproximationEnum){
-		vzFS_input=element->GetInput(VzFSEnum);       _assert_(vzFS_input);
+		vzFS_input=element->GetInput2(VzFSEnum);       _assert_(vzFS_input);
 	}
 
@@ -419,11 +419,11 @@
 	element->GetVerticesCoordinatesTop(&xyz_list_surface);
 	element->GetInputValue(&approximation,ApproximationEnum);
-	Input* surface_input    =element->GetInput(SurfaceEnum);               _assert_(surface_input);
-	Input* smb_input=element->GetInput(SmbMassBalanceEnum);    _assert_(smb_input);
-	Input* vx_input=element->GetInput(VxEnum);                             _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum);                             _assert_(vy_input);
-	Input* vzFS_input=NULL;
+	Input2* surface_input    =element->GetInput2(SurfaceEnum);   _assert_(surface_input);
+	Input2* smb_input=element->GetInput2(SmbMassBalanceEnum);    _assert_(smb_input);
+	Input2* vx_input=element->GetInput2(VxEnum);                 _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum);                 _assert_(vy_input);
+	Input2* vzFS_input=NULL;
 	if(approximation==HOFSApproximationEnum || approximation==SSAFSApproximationEnum){
-		vzFS_input=element->GetInput(VzFSEnum);       _assert_(vzFS_input);
+		vzFS_input=element->GetInput2(VzFSEnum); _assert_(vzFS_input);
 	}
 
@@ -474,9 +474,9 @@
 	element->GetVerticesCoordinates(&xyz_list);
 	element->GetInputValue(&approximation,ApproximationEnum);
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vzFS_input=NULL;
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vzFS_input=NULL;
 	if(approximation==HOFSApproximationEnum || approximation==SSAFSApproximationEnum){
-		vzFS_input=element->GetInput(VzFSEnum); _assert_(vzFS_input);
+		vzFS_input=element->GetInput2(VzFSEnum); _assert_(vzFS_input);
 	}
 
@@ -561,7 +561,7 @@
 	/*Do some modifications if we actually have a HOFS or SSAFS element*/
 	if(approximation==HOFSApproximationEnum){
-		Input* vzFS_input=element->GetInput(VzFSEnum);
+		Input2* vzFS_input=element->GetInput2(VzFSEnum);
 		if (vzFS_input){
-			if (vzFS_input->ObjectEnum()!=PentaInputEnum) _error_("Cannot compute Vel as VzFS is of type " << EnumToStringx(vzFS_input->ObjectEnum()));
+			if (vzFS_input->ObjectEnum()!=PentaInput2Enum) _error_("Cannot compute Vel as VzFS is of type " << EnumToStringx(vzFS_input->ObjectEnum()));
 			element->GetInputListOnNodes(&vzFS[0],VzFSEnum,0.);
 		}
@@ -573,7 +573,7 @@
 	}
 	else if(approximation==SSAFSApproximationEnum){
-		Input* vzFS_input=element->GetInput(VzFSEnum);
+		Input2* vzFS_input=element->GetInput2(VzFSEnum);
 		if (vzFS_input){
-			if (vzFS_input->ObjectEnum()!=PentaInputEnum) _error_("Cannot compute Vel as VzFS is of type " << EnumToStringx(vzFS_input->ObjectEnum()));
+			if (vzFS_input->ObjectEnum()!=PentaInput2Enum) _error_("Cannot compute Vel as VzFS is of type " << EnumToStringx(vzFS_input->ObjectEnum()));
 			element->GetInputListOnNodes(&vzFS[0],VzFSEnum,0.);
 		}
@@ -597,14 +597,14 @@
 	}
 	if(approximation!=HOFSApproximationEnum && approximation!=SSAFSApproximationEnum){
-		element->AddInput(PressureEnum,pressure,element->GetElementType());
+		element->AddInput2(PressureEnum,pressure,element->GetElementType());
 	}
 	else if(approximation==HOFSApproximationEnum){
-		element->AddInput(VzHOEnum,vzHO,P1Enum);
+		element->AddInput2(VzHOEnum,vzHO,P1Enum);
 	}
 	else if(approximation==SSAFSApproximationEnum){
-		element->AddInput(VzSSAEnum,vzSSA,P1Enum);
-	}
-	element->AddInput(VzEnum,vz,P1Enum);
-	element->AddInput(VelEnum,vel,P1Enum);
+		element->AddInput2(VzSSAEnum,vzSSA,P1Enum);
+	}
+	element->AddInput2(VzEnum,vz,P1Enum);
+	element->AddInput2(VelEnum,vel,P1Enum);
 
 	/*Free ressources:*/
Index: /issm/trunk/src/c/analyses/StressbalanceVerticalAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/StressbalanceVerticalAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/StressbalanceVerticalAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/ThermalAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/ThermalAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/ThermalAnalysis.cpp	(revision 24686)
@@ -104,5 +104,5 @@
 	return 1;
 }/*}}}*/
-void ThermalAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void ThermalAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	int frictionlaw,basalforcing_model,materialstype;
@@ -119,5 +119,5 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
@@ -130,41 +130,41 @@
 	iomodel->FindConstant(&materialstype,"md.materials.type");
 
-	iomodel->FetchDataToInput(elements,"md.geometry.thickness",ThicknessEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.surface",SurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.geometry.base",BaseEnum);
-	iomodel->FetchDataToInput(elements,"md.slr.sealevel",SealevelEnum,0);
-	iomodel->FetchDataToInput(elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
-	iomodel->FetchDataToInput(elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.thickness",ThicknessEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.surface",SurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.geometry.base",BaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.slr.sealevel",SealevelEnum,0);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.ice_levelset",MaskIceLevelsetEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mask.groundedice_levelset",MaskGroundediceLevelsetEnum);
 	if(iomodel->domaintype!=Domain2DhorizontalEnum){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	}
-	iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
-	iomodel->FetchDataToInput(elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum);
-	iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum);
-	InputUpdateFromConstantx(elements,0.,VxMeshEnum);
-	InputUpdateFromConstantx(elements,0.,VyMeshEnum);
-	InputUpdateFromConstantx(elements,0.,VzMeshEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	}
+	iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonsurface",MeshVertexonsurfaceEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,VxMeshEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,VyMeshEnum);
+	InputUpdateFromConstantx(inputs2,elements,0.,VzMeshEnum);
 
 	/*Rheology type*/
-	iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
 	switch(materialstype){
 		case MatenhancediceEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
 			break;
 		case MatdamageiceEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
 			break;
 		case MatestarEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
 			break;
 		case MaticeEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
 			break;
 		default:
@@ -172,5 +172,5 @@
 	}
 	if(ismovingfront){
-		iomodel->FetchDataToInput(elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum); // required for updating active nodes
+		iomodel->FetchDataToInput(inputs2,elements,"md.mesh.vertexonbase",MeshVertexonbaseEnum); // required for updating active nodes
 	}
 	/*Basal forcings variables*/
@@ -180,5 +180,5 @@
 			break;
 		default:
-			iomodel->FetchDataToInput(elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.basalforcings.geothermalflux",BasalforcingsGeothermalfluxEnum);
 			break;
 	}
@@ -187,65 +187,65 @@
 		case 1:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 2:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
 			break;
 		case 3:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.As",FrictionAsEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.As",FrictionAsEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 4:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
 			break;
 		case 5:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.water_layer",FrictionWaterLayerEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.water_layer",FrictionWaterLayerEnum);
 			break;
 		case 6:
-			iomodel->FetchDataToInput(elements,"md.friction.C",FrictionCEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.m",FrictionMEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum);
-			iomodel->FetchDataToInput(elements,"md.initialization.temperature",TemperatureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.C",FrictionCEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.m",FrictionMEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.initialization.temperature",TemperatureEnum);
 			break;
 		case 7:
 			iomodel->FindConstant(&FrictionCoupling,"md.friction.coupling");
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.p",FrictionPEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.q",FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficientcoulomb",FrictionCoefficientcoulombEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.p",FrictionPEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.q",FrictionQEnum);
 			if (FrictionCoupling==3){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",FrictionEffectivePressureEnum);}
 			else if(FrictionCoupling==4){
-				iomodel->FetchDataToInput(elements,"md.friction.effective_pressure",EffectivePressureEnum);
+				iomodel->FetchDataToInput(inputs2,elements,"md.friction.effective_pressure",EffectivePressureEnum);
 			}
 			break;
 		case 9:
-			iomodel->FetchDataToInput(elements,"md.friction.coefficient",FrictionCoefficientEnum);
-			iomodel->FetchDataToInput(elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
-			InputUpdateFromConstantx(elements,1.,FrictionPEnum);
-			InputUpdateFromConstantx(elements,1.,FrictionQEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.coefficient",FrictionCoefficientEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.friction.pressure_adjusted_temperature",FrictionPressureAdjustedTemperatureEnum);
+			InputUpdateFromConstantx(inputs2,elements,1.,FrictionPEnum);
+			InputUpdateFromConstantx(inputs2,elements,1.,FrictionQEnum);
 			break;
 		default:
@@ -275,12 +275,19 @@
 	int frictionlaw;
 	iomodel->FindConstant(&frictionlaw,"md.friction.law");
-	if(frictionlaw==4 || frictionlaw==6){
+	if(frictionlaw==6){
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 	}
-	if(frictionlaw==3 || frictionlaw==1 || frictionlaw==7){
+	if(frictionlaw==4){
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
+	}
+	if(frictionlaw==1 || frictionlaw==3 || frictionlaw==7){
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.coupling",FrictionCouplingEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 	}
 	if(frictionlaw==9){
 		parameters->AddObject(iomodel->CopyConstantObject("md.friction.gamma",FrictionGammaEnum));
+		parameters->AddObject(iomodel->CopyConstantObject("md.friction.effective_pressure_limit",FrictionEffectivePressureLimitEnum));
 		parameters->AddObject(new IntParam(FrictionCouplingEnum,0));
 	}
@@ -338,10 +345,10 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	IssmDouble  gravity             = element->FindParam(ConstantsGEnum);
-	IssmDouble  rho_water           = element->FindParam(MaterialsRhoSeawaterEnum);
-	IssmDouble  rho_ice             = element->FindParam(MaterialsRhoIceEnum);
-	IssmDouble  heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
-	IssmDouble  mixed_layer_capacity= element->FindParam(MaterialsMixedLayerCapacityEnum);
-	IssmDouble  thermal_exchange_vel= element->FindParam(MaterialsThermalExchangeVelocityEnum);
+	IssmDouble gravity             = element->FindParam(ConstantsGEnum);
+	IssmDouble rho_water           = element->FindParam(MaterialsRhoSeawaterEnum);
+	IssmDouble rho_ice             = element->FindParam(MaterialsRhoIceEnum);
+	IssmDouble heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
+	IssmDouble mixed_layer_capacity= element->FindParam(MaterialsMixedLayerCapacityEnum);
+	IssmDouble thermal_exchange_vel= element->FindParam(MaterialsThermalExchangeVelocityEnum);
 
 	/* Start  looping on the number of gaussian points: */
@@ -401,10 +408,10 @@
 	IssmDouble  thermalconductivity = element->FindParam(MaterialsThermalconductivityEnum);
 	IssmDouble  kappa = thermalconductivity/(rho_ice*heatcapacity);
-	Input* vx_input  = element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input  = element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input  = element->GetInput(VzEnum);     _assert_(vz_input);
-	Input* vxm_input = element->GetInput(VxMeshEnum); _assert_(vxm_input);
-	Input* vym_input = element->GetInput(VyMeshEnum); _assert_(vym_input);
-	Input* vzm_input = element->GetInput(VzMeshEnum); _assert_(vzm_input);
+	Input2* vx_input  = element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input  = element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input  = element->GetInput2(VzEnum);     _assert_(vz_input);
+	Input2* vxm_input = element->GetInput2(VxMeshEnum); _assert_(vxm_input);
+	Input2* vym_input = element->GetInput2(VyMeshEnum); _assert_(vym_input);
+	Input2* vzm_input = element->GetInput2(VzMeshEnum); _assert_(vzm_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -551,8 +558,8 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input* vx_input             = element->GetInput(VxEnum);                          _assert_(vx_input);
-	Input* vy_input             = element->GetInput(VyEnum);                          _assert_(vy_input);
-	Input* vz_input             = element->GetInput(VzEnum);                          _assert_(vz_input);
-	Input* geothermalflux_input = element->GetInput(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
+	Input2* vx_input             = element->GetInput2(VxEnum);                          _assert_(vx_input);
+	Input2* vy_input             = element->GetInput2(VyEnum);                          _assert_(vy_input);
+	Input2* vz_input             = element->GetInput2(VzEnum);                          _assert_(vz_input);
+	Input2* geothermalflux_input = element->GetInput2(BasalforcingsGeothermalfluxEnum); _assert_(geothermalflux_input);
 	IssmDouble  rho_ice             = element->FindParam(MaterialsRhoIceEnum);
 	IssmDouble  heatcapacity        = element->FindParam(MaterialsHeatcapacityEnum);
@@ -612,5 +619,5 @@
 	element->GetVerticesCoordinatesBase(&xyz_list_base);
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
-	Input*      pressure_input=element->GetInput(PressureEnum); _assert_(pressure_input);
+	Input2*      pressure_input=element->GetInput2(PressureEnum); _assert_(pressure_input);
 	IssmDouble  gravity             = element->FindParam(ConstantsGEnum);
 	IssmDouble  rho_water           = element->FindParam(MaterialsRhoSeawaterEnum);
@@ -674,9 +681,9 @@
 	element->FindParam(&dt,TimesteppingTimeStepEnum);
 	element->FindParam(&stabilization,ThermalStabilizationEnum);
-	Input* vx_input=element->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input=element->GetInput(VzEnum); _assert_(vz_input);
-	Input* temperature_input = NULL;
-	if(reCast<bool,IssmDouble>(dt)){temperature_input = element->GetInput(TemperatureEnum); _assert_(temperature_input);}
+	Input2* vx_input=element->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input=element->GetInput2(VzEnum); _assert_(vz_input);
+	Input2* temperature_input = NULL;
+	if(reCast<bool,IssmDouble>(dt)){temperature_input = element->GetInput2(TemperatureEnum); _assert_(temperature_input);}
 
 	/* Start  looping on the number of gaussian points: */
@@ -873,5 +880,5 @@
 	element->GetInputValue(&converged,ConvergedEnum);
 	if(converged){
-		element->AddInput(TemperatureEnum,values,element->GetElementType());
+		element->AddInput2(TemperatureEnum,values,element->GetElementType());
 
 		IssmDouble* n = xNew<IssmDouble>(numnodes);
@@ -894,26 +901,26 @@
 			case BuddJackaEnum:
 				for(i=0;i<numnodes;i++) B[i]=BuddJacka(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],element->GetElementType());
 				break;
 			case CuffeyEnum:
 				for(i=0;i<numnodes;i++) B[i]=Cuffey(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],element->GetElementType());
 				break;
 			case PatersonEnum:
 				for(i=0;i<numnodes;i++) B[i]=Paterson(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],element->GetElementType());
 				break;
 			case NyeH2OEnum:
 				for(i=0;i<numnodes;i++) B[i]=NyeH2O(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],element->GetElementType());
 				break; 
 			case NyeCO2Enum:
 				for(i=0;i<numnodes;i++) B[i]=NyeCO2(values[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],element->GetElementType());
 				break;
 			case ArrheniusEnum:{
 				element->GetVerticesCoordinates(&xyz_list);
 				for(i=0;i<numnodes;i++) B[i]=Arrhenius(values[i],surface[i]-xyz_list[i*3+2],n[i]);
-				element->AddInput(MaterialsRheologyBEnum,&B[0],element->GetElementType());
+				element->AddInput2(MaterialsRheologyBEnum,&B[0],element->GetElementType());
 				break;
 				}
@@ -924,5 +931,5 @@
 	}
 	else{
-		element->AddInput(TemperaturePicardEnum,values,element->GetElementType());
+		element->AddInput2(TemperaturePicardEnum,values,element->GetElementType());
 	}
 
Index: /issm/trunk/src/c/analyses/ThermalAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/ThermalAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/ThermalAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/analyses/UzawaPressureAnalysis.cpp
===================================================================
--- /issm/trunk/src/c/analyses/UzawaPressureAnalysis.cpp	(revision 24685)
+++ /issm/trunk/src/c/analyses/UzawaPressureAnalysis.cpp	(revision 24686)
@@ -27,5 +27,5 @@
 	return 1;
 }/*}}}*/
-void UzawaPressureAnalysis::UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
+void UzawaPressureAnalysis::UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type){/*{{{*/
 
 	/*Update elements: */
@@ -42,14 +42,14 @@
 		if(iomodel->my_elements[i]){
 			Element* element=(Element*)elements->GetObjectByOffset(counter);
-			element->Update(i,iomodel,analysis_counter,analysis_type,finiteelement);
+			element->Update(inputs2,i,iomodel,analysis_counter,analysis_type,finiteelement);
 			counter++;
 		}
 	}
 
-	iomodel->FetchDataToInput(elements,"md.initialization.vx",VxEnum,0.);
-	iomodel->FetchDataToInput(elements,"md.initialization.vy",VyEnum,0.);
-	if(iomodel->domaintype==Domain3DEnum) iomodel->FetchDataToInput(elements,"md.initialization.vz",VzEnum,0.);
-	iomodel->FetchDataToInput(elements,"md.initialization.pressure",PressureEnum,0.);
-	InputUpdateFromConstantx(elements,0.,SigmaNNEnum);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vx",VxEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vy",VyEnum,0.);
+	if(iomodel->domaintype==Domain3DEnum) iomodel->FetchDataToInput(inputs2,elements,"md.initialization.vz",VzEnum,0.);
+	iomodel->FetchDataToInput(inputs2,elements,"md.initialization.pressure",PressureEnum,0.);
+	InputUpdateFromConstantx(inputs2,elements,0.,SigmaNNEnum);
 }/*}}}*/
 void UzawaPressureAnalysis::UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum){/*{{{*/
@@ -124,14 +124,14 @@
 
 	/*Initialize Element matrix and vectors*/
-	ElementVector* pe    = element->NewElementVector();
-	IssmDouble*    basis = xNew<IssmDouble>(numnodes);
-	IssmDouble*    dvx   = xNew<IssmDouble>(dim);
-	IssmDouble*    dvy   = xNew<IssmDouble>(dim);
-	IssmDouble*    dvz   = xNew<IssmDouble>(dim);
-
-	Input* vx_input=element->GetInput(VxEnum);     _assert_(vx_input);
-	Input* vy_input=element->GetInput(VyEnum);     _assert_(vy_input);
-	Input* vz_input = NULL;
-	if(dim==3){vz_input=element->GetInput(VzEnum); _assert_(vz_input);}
+	ElementVector *pe    = element->NewElementVector();
+	IssmDouble    *basis = xNew<IssmDouble>(numnodes);
+	IssmDouble     dvx[3];
+	IssmDouble     dvy[3];
+	IssmDouble     dvz[3];
+
+	Input2* vx_input=element->GetInput2(VxEnum);     _assert_(vx_input);
+	Input2* vy_input=element->GetInput2(VyEnum);     _assert_(vy_input);
+	Input2* vz_input = NULL;
+	if(dim==3){vz_input=element->GetInput2(VzEnum); _assert_(vz_input);}
 
 	Gauss* gauss = element->NewGauss(5);
@@ -159,7 +159,4 @@
 	xDelete<IssmDouble>(xyz_list);
 	xDelete<IssmDouble>(basis);
-	xDelete<IssmDouble>(dvx);
-	xDelete<IssmDouble>(dvy);
-	xDelete<IssmDouble>(dvz);
 	return pe;
 }/*}}}*/
@@ -209,8 +206,8 @@
 	IssmDouble* valueslambda  = xNewZeroInit<IssmDouble>(numnodessigma);
 	IssmDouble* pressure      = xNew<IssmDouble>(numnodes);
-	Input* vx_input           = element->GetInput(VxEnum);      _assert_(vx_input);
-	Input* vy_input           = element->GetInput(VyEnum);      _assert_(vy_input);
-	Input* vz_input           = NULL;
-	if(dim==3){vz_input       = element->GetInput(VzEnum);      _assert_(vz_input);}
+	Input2* vx_input           = element->GetInput2(VxEnum);      _assert_(vx_input);
+	Input2* vy_input           = element->GetInput2(VyEnum);      _assert_(vy_input);
+	Input2* vz_input           = NULL;
+	if(dim==3){vz_input       = element->GetInput2(VzEnum);      _assert_(vz_input);}
 	element->GetInputListOnNodes(&pressure[0],PressureEnum);
 
@@ -219,9 +216,9 @@
 		values[i]  = pressure[i] + solution[doflist[i]];
 	}
-	element->AddInput(PressureEnum,values,element->GetElementType());
+	element->AddInput2(PressureEnum,values,element->GetElementType());
 
 	/*Now compute sigmann if on base*/
 	if(element->IsOnBase() && 0){ 
-		Input* sigmann_input      = element->GetInput(SigmaNNEnum); _assert_(sigmann_input);
+		Input2* sigmann_input      = element->GetInput2(SigmaNNEnum); _assert_(sigmann_input);
 		if(dim==3) _error_("not implemented yet");
 
@@ -234,5 +231,5 @@
 		IssmDouble  deltalambda[3] = {0.0};
 		IssmDouble* vertexonbase  = xNew<IssmDouble>(numnodessigma);
-		Input* vertexonbase_input = element->GetInput(MeshVertexonbaseEnum); _assert_(vertexonbase_input);
+		Input2* vertexonbase_input = element->GetInput2(MeshVertexonbaseEnum); _assert_(vertexonbase_input);
 		Gauss* gauss = element->NewGauss();
 
@@ -288,5 +285,5 @@
 		xDelete<IssmDouble>(basis);
 
-		element->AddInput(SigmaNNEnum,valueslambda,P2Enum);
+		element->AddInput2(SigmaNNEnum,valueslambda,P2Enum);
 	}
 
Index: /issm/trunk/src/c/analyses/UzawaPressureAnalysis.h
===================================================================
--- /issm/trunk/src/c/analyses/UzawaPressureAnalysis.h	(revision 24685)
+++ /issm/trunk/src/c/analyses/UzawaPressureAnalysis.h	(revision 24686)
@@ -17,5 +17,5 @@
 		void CreateNodes(Nodes* nodes,IoModel* iomodel,bool isamr=false);
 		int  DofsPerNode(int** doflist,int domaintype,int approximation);
-		void UpdateElements(Elements* elements,IoModel* iomodel,int analysis_counter,int analysis_type);
+		void UpdateElements(Elements* elements,Inputs2* inputs2,IoModel* iomodel,int analysis_counter,int analysis_type);
 		void UpdateParameters(Parameters* parameters,IoModel* iomodel,int solution_enum,int analysis_enum);
 
Index: /issm/trunk/src/c/bamg/BamgQuadtree.cpp
===================================================================
--- /issm/trunk/src/c/bamg/BamgQuadtree.cpp	(revision 24685)
+++ /issm/trunk/src/c/bamg/BamgQuadtree.cpp	(revision 24686)
@@ -2,7 +2,5 @@
 #include <string.h>
 #include <stdlib.h>
-
 #include "./bamgobjects.h"
-#include "../datastructures/datastructures.h"
 
 namespace bamg {
@@ -68,7 +66,4 @@
 		this->NbVertices    = 0;
 
-		/*Create container*/
-		this->boxcontainer=new DataSet();
-
 		/*Create Root, pointer toward the main box*/
 		this->root=NewBamgQuadtreeBox();
@@ -80,7 +75,4 @@
 		this->NbQuadtreeBox = 0;
 		this->NbVertices    = 0;
-
-		/*Create container*/
-		this->boxcontainer=new DataSet();
 
 		/*Create Root, pointer toward the main box*/
@@ -99,5 +91,10 @@
 	/*}}}*/
 	BamgQuadtree::~BamgQuadtree() {/*{{{*/
-		delete boxcontainer;
+
+		vector<BamgQuadtreeBox*>::reverse_iterator object;
+		for(object=boxcontainer.rbegin() ; object <boxcontainer.rend(); object++ ){
+			delete (*object);
+		}
+		boxcontainer.clear();
 		root=NULL;
 	}
@@ -514,5 +511,5 @@
 
 		/*Add root to the container*/
-		boxcontainer->AddObject(newbox);
+		boxcontainer.push_back(newbox);
 		this->NbQuadtreeBox++;
 
Index: /issm/trunk/src/c/bamg/BamgQuadtree.h
===================================================================
--- /issm/trunk/src/c/bamg/BamgQuadtree.h	(revision 24685)
+++ /issm/trunk/src/c/bamg/BamgQuadtree.h	(revision 24686)
@@ -3,6 +3,6 @@
 #define _BAMGQUADTREE_H
 
+#include <vector>
 #include "./include.h"
-#include "../datastructures/datastructures.h"
 class BamgVertex;
 
@@ -17,20 +17,13 @@
 			 * - up to 4 vertices
 			 * - 4 "sub" quadtree boxes*/
-			class BamgQuadtreeBox: public Object{ 
+			class BamgQuadtreeBox{ 
 				public:
 					int              nbitems;
 					BamgQuadtreeBox *box[4];
 					BamgVertex      *v[4];
-					/*Object functions*/
-					void    Echo()       {_error_("not implemented yet"); };
-					void    DeepEcho()   {_error_("not implemented yet"); };
-					int     Id()         {_error_("not implemented yet"); };
-					int     ObjectEnum() {_error_("not implemented yet"); };
-					Object *copy()       {_error_("not implemented yet"); };
-					void Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ _error_("not implemented yet!"); };
 			};
 
 			/*BamgQuadtree private Fields*/
-			DataSet* boxcontainer;
+			std::vector<BamgQuadtreeBox*> boxcontainer;
 
 		public:
Index: /issm/trunk/src/c/classes/AdaptiveMeshRefinement.cpp
===================================================================
--- /issm/trunk/src/c/classes/AdaptiveMeshRefinement.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/AdaptiveMeshRefinement.cpp	(revision 24686)
@@ -10,4 +10,32 @@
 
 #include "./AdaptiveMeshRefinement.h"
+
+/*Includes*/
+/*{{{*/
+/*Common includes*/
+#include <iostream>
+#include <fstream>
+#include <string>
+#include <climits>
+#include <cfloat>
+
+/*NeoPZ includes*/
+#include <pz_config.h>
+#include <pzreal.h>
+#include <pzvec.h>
+#include <pzeltype.h>
+
+#include <TPZRefPatternTools.h>
+#include <TPZRefPatternDataBase.h>
+#include <TPZRefPattern.h>
+
+#include <tpzchangeel.h>
+#include <TPZGeoElement.h>
+#include <pzreftriangle.h>
+#include <pzgeotriangle.h>
+#include <tpzgeoelrefpattern.h>
+#include <pzgraphmesh.h>
+#include <TPZVTKGeoMesh.h>
+/*}}}*/
 
 using namespace pzgeom;
@@ -86,7 +114,7 @@
 	if(this->fathermesh)    delete this->fathermesh;
 	if(this->previousmesh)  delete this->previousmesh;
-	if(this->x)					delete this->x;
-	if(this->y)					delete this->y;
-	if(this->elementslist)	delete this->elementslist;
+	if(this->x)					xDelete<IssmDouble>(this->x);
+	if(this->y)					xDelete<IssmDouble>(this->y);
+	if(this->elementslist)	xDelete<int>(this->elementslist);
 	this->refinement_type				= -1;
 	this->level_max						= -1;
@@ -612,7 +640,7 @@
 
 	/*Generate the elements*/
-	long index;
+	int64_t index;
    const int mat = this->GetElemMaterialID();
-   TPZManVector<long> elem(this->GetNumberOfNodes(),0);
+   TPZManVector<int64_t> elem(this->GetNumberOfNodes(),0);
 	this->index2sid.clear(); this->index2sid.resize(this->numberofelements);
    this->sid2index.clear();
@@ -646,5 +674,5 @@
    int mat     = this->GetElemMaterialID();;
    int reftype = 1;
-   long index; 
+   int64_t index; 
 
 	//nodes
@@ -662,5 +690,5 @@
 		}
 
-		TPZManVector<long> elem(3,0);
+		TPZManVector<int64_t> elem(3,0);
       for(int j=0;j<3;j++) elem[j] = geoel->NodeIndex(j);
 
@@ -732,6 +760,6 @@
 	/*Basic verification*/
 	if(!pdata) _error_("Impossible to continue: pdata is NULL!\n");
-	if(pdata[0]<=0) _error_("Impossible to continue: nvertices <=0!\n");
-	if(pdata[1]<=0) _error_("Impossible to continue: nelements <=0!\n");
+	if(**pdata<=0) _error_("Impossible to continue: nvertices <=0!\n");
+	if(*(*pdata+1)<=0) _error_("Impossible to continue: nelements <=0!\n");
 	if(!pxy) _error_("Impossible to continue: pxy is NULL!\n");
 	if(!pelements) _error_("Impossible to continue: pelements is NULL!\n");
Index: /issm/trunk/src/c/classes/AdaptiveMeshRefinement.h
===================================================================
--- /issm/trunk/src/c/classes/AdaptiveMeshRefinement.h	(revision 24685)
+++ /issm/trunk/src/c/classes/AdaptiveMeshRefinement.h	(revision 24686)
@@ -4,32 +4,8 @@
 /*Includes*/
 /*{{{*/
-/*Common includes*/
-#include <iostream>
-#include <fstream>
-#include <string>
-#include <climits>
-#include <cfloat>
-
 /*NeoPZ includes*/
-#include <pz_config.h>
-#include <pzreal.h>
 #include <pzgmesh.h>
-#include <pzvec.h>
-#include <pzeltype.h>
-
-#include <TPZRefPatternTools.h>
-#include <TPZRefPatternDataBase.h>
-#include <TPZRefPattern.h>
-
-#include <tpzchangeel.h>
-#include <TPZGeoElement.h>
-#include <pzreftriangle.h>
-#include <pzgeotriangle.h>
-#include <tpzgeoelrefpattern.h>
-#include <pzgraphmesh.h>
-#include <TPZVTKGeoMesh.h>
-
+/*ISSM includes*/
 #include "../shared/shared.h"
-
 /*}}}*/
 
Index: /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Cfdragcoeffabsgrad.cpp	(revision 24686)
@@ -20,6 +20,6 @@
 #include "../modules/SurfaceAreax/SurfaceAreax.h"
 #include "../classes/Params/Parameters.h"
-#include "../classes/Inputs/Input.h"
 #include "../classes/gauss/Gauss.h"
+#include "./Inputs2/DatasetInput2.h"
 /*}}}*/
 
@@ -151,11 +151,6 @@
 
 	/*Get input if it already exists*/
-	Input*  tempinput = basalelement->GetInput(definitionenum);
-	/*Cast it to a Datasetinput*/
-	if(tempinput->ObjectEnum()!=DatasetInputEnum) _error_("don't know what to do! confused!");
-	DatasetInput* datasetinput = (DatasetInput*)tempinput;
-
-	/*Get the drag from the model*/
-	Input* drag_input=basalelement->GetInput(FrictionCoefficientEnum);	_assert_(drag_input);
+	DatasetInput2 *datasetinput = basalelement->GetDatasetInput2(definitionenum);  _assert_(datasetinput);
+	Input2        *drag_input   = basalelement->GetInput2(FrictionCoefficientEnum); _assert_(drag_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/classes/Cfsurfacelogvel.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacelogvel.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Cfsurfacelogvel.cpp	(revision 24686)
@@ -20,6 +20,6 @@
 #include "../modules/SurfaceAreax/SurfaceAreax.h"
 #include "../classes/Params/Parameters.h"
-#include "../classes/Inputs/Input.h"
 #include "../classes/gauss/Gauss.h"
+#include "./Inputs2/DatasetInput2.h"
 /*}}}*/
 
@@ -162,16 +162,12 @@
 
 	/*Get model values*/
-	Input* vx_input     =topelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vy_input	  =NULL;
+	Input2 *vx_input = topelement->GetInput2(VxEnum); _assert_(vx_input);
+	Input2 *vy_input = NULL;
 	if(numcomponents==2){
-	      vy_input    =topelement->GetInput(VyEnum);							              _assert_(vy_input);
+	      vy_input = topelement->GetInput2(VyEnum); _assert_(vy_input);
 	}
 
 	/*Retrieve all inputs we will be needing: */
-	DatasetInput*    datasetinput = NULL;
-	Input*  tempinput = topelement->GetInput(definitionenum);
-	/*Cast it to a Datasetinput*/
-	if(tempinput->ObjectEnum()!=DatasetInputEnum) _error_("don't know what to do");
-	datasetinput = (DatasetInput*)tempinput;
+	DatasetInput2 *datasetinput = topelement->GetDatasetInput2(definitionenum); _assert_(datasetinput);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/classes/Cfsurfacesquare.cpp
===================================================================
--- /issm/trunk/src/c/classes/Cfsurfacesquare.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Cfsurfacesquare.cpp	(revision 24686)
@@ -20,6 +20,6 @@
 #include "../modules/SurfaceAreax/SurfaceAreax.h"
 #include "../classes/Params/Parameters.h"
-#include "../classes/Inputs/Input.h"
 #include "../classes/gauss/Gauss.h"
+#include "./Inputs2/DatasetInput2.h"
 /*}}}*/
 
@@ -167,17 +167,6 @@
 
 	/*Retrieve all inputs we will be needing: */
-
-	DatasetInput*    datasetinput = NULL;
-
-	/*Get input if it already exists*/
-	Input*  tempinput = topelement->GetInput(definitionenum);
-
-	/*Cast it to a Datasetinput*/
-	if(tempinput->ObjectEnum()!=DatasetInputEnum) _error_("don't know what to do");
-	datasetinput = (DatasetInput*)tempinput;
-
-	Input* model_input=topelement->GetInput(model_enum);												_assert_(model_input);
-	//Input* observation_input=topelement->GetInput(observation_enum);								_assert_(observation_input);
-	//Input* weights_input     =topelement->GetInput(weights_enum);									_assert_(weights_input);
+	DatasetInput2 *datasetinput = topelement->GetDatasetInput2(definitionenum); _assert_(datasetinput);
+	Input2        *model_input  = topelement->GetInput2(model_enum);            _assert_(model_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/classes/Dakota/IssmParallelDirectApplicInterface.cpp
===================================================================
--- /issm/trunk/src/c/classes/Dakota/IssmParallelDirectApplicInterface.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Dakota/IssmParallelDirectApplicInterface.cpp	(revision 24686)
@@ -64,5 +64,6 @@
 
 		#ifdef MPI_DEBUG
-		Cout << "eval server id" << evalServerId << " invoking " << ac_name << " within SIM::IssmParallelDirectApplicInterface." << std::endl;
+		_printf0_("eval server id" << evalServerId << " invoking " << ac_name << " within SIM::IssmParallelDirectApplicInterface." << std::endl);
+		_printf0_("evalServerId " << evalServerId << "evaluation_id " << currEvalId <<  "\n");
 		#endif // MPI_DEBUG
 
@@ -101,4 +102,7 @@
 		femmodel->parameters->FindParam(&solution_type,SolutionTypeEnum);
 		femmodel->parameters->FindParam(&control_analysis,InversionIscontrolEnum);
+
+		/*include currEvalId in parameters:*/
+		femmodel->parameters->SetParam(currEvalId,QmuCurrEvalIdEnum);
 
 		/*Modify core inputs in objects contained in femmodel, to reflect the dakota variables inputs: */
Index: /issm/trunk/src/c/classes/Elements/Element.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Element.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Element.cpp	(revision 24686)
@@ -15,10 +15,17 @@
 #include "../../shared/shared.h"
 #include "../../modules/SurfaceMassBalancex/SurfaceMassBalancex.h"
-/*}}}*/
+#include "../Inputs2/BoolInput2.h"
+#include "../Inputs2/TransientInput2.h"
+#include "../Inputs2/ElementInput2.h"
+#include "../Inputs2/PentaInput2.h"
+#include "../Inputs2/DatasetInput2.h"
+#include "../Inputs2/ArrayInput2.h"
+/*}}}*/
+#define MAXVERTICES 6 /*Maximum number of vertices per element, currently Penta, to avoid dynamic mem allocation*/
 
 #ifdef _HAVE_SEMIC_
 /* SEMIC prototype {{{*/
-extern "C" void run_semic_(double *sf_in, double *rf_in, double *swd_in, double *lwd_in, double *wind_in, double *sp_in, double *rhoa_in,
-			double *qq_in, double *tt_in, double *tsurf_out, double *smb_out, double *saccu_out, double *smelt_out);
+extern "C" void run_semic_(IssmDouble *sf_in, IssmDouble *rf_in, IssmDouble *swd_in, IssmDouble *lwd_in, IssmDouble *wind_in, IssmDouble *sp_in, IssmDouble *rhoa_in,
+			IssmDouble *qq_in, IssmDouble *tt_in, IssmDouble *tsurf_out, IssmDouble *smb_out, IssmDouble *saccu_out, IssmDouble *smelt_out);
 #endif
 // _HAVE_SEMIC_
@@ -28,5 +35,6 @@
 	this->id  = -1;
 	this->sid = -1;
-	this->inputs     = NULL;
+	this->lid = -1;
+	this->inputs2    = NULL;
 	this->nodes      = NULL;
 	this->vertices   = NULL;
@@ -37,14 +45,17 @@
 Element::~Element(){/*{{{*/
 	xDelete<int>(element_type_list);
-	delete inputs;
 }
 /*}}}*/
 
 /*Other*/
-void       Element::AddInput(Input* input_in){/*{{{*/
-
-	/*Call inputs method*/
-	_assert_(this->inputs);
-	this->inputs->AddInput(input_in);
+bool       Element::AnyFSet(){/*{{{*/
+
+	/*Fetch number of nodes and dof for this finite element*/
+	int numnodes = this->GetNumberOfNodes();
+
+	for(int i=0;i<numnodes;i++){
+		if(nodes[i]->fsize) return true;
+	}
+	return false;
 }/*}}}*/
 void       Element::ComputeLambdaS(){/*{{{*/
@@ -60,8 +71,8 @@
 	this->GetVerticesCoordinates(&xyz_list);
 	parameters->FindParam(&dim,DomainDimensionEnum);
-	Input* vx_input=this->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=this->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input=NULL;
-	if(dim==3){vz_input=this->GetInput(VzEnum); _assert_(vz_input);}
+	Input2* vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input=NULL;
+	if(dim==3){vz_input=this->GetInput2(VzEnum); _assert_(vz_input);}
 
 	/*Allocate arrays*/
@@ -122,5 +133,5 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->AddInput(LambdaSEnum,lambdas,P1Enum);
+	this->AddInput2(LambdaSEnum,lambdas,P1Enum);
 
 	/*Clean up and return*/
@@ -148,14 +159,14 @@
 
 	/* Retrieve inputs */
-	Input* eps_xx_input=this->GetInput(StrainRatexxEnum); _assert_(eps_xx_input);
-	Input* eps_yy_input=this->GetInput(StrainRateyyEnum); _assert_(eps_yy_input);
-	Input* eps_xy_input=this->GetInput(StrainRatexyEnum); _assert_(eps_xy_input);
-	Input* eps_xz_input=NULL;
-	Input* eps_yz_input=NULL;
-	Input* eps_zz_input=NULL;
+	Input2* eps_xx_input=this->GetInput2(StrainRatexxEnum); _assert_(eps_xx_input);
+	Input2* eps_yy_input=this->GetInput2(StrainRateyyEnum); _assert_(eps_yy_input);
+	Input2* eps_xy_input=this->GetInput2(StrainRatexyEnum); _assert_(eps_xy_input);
+	Input2* eps_xz_input=NULL;
+	Input2* eps_yz_input=NULL;
+	Input2* eps_zz_input=NULL;
 	if(dim==3){
-		eps_xz_input=this->GetInput(StrainRatexzEnum); _assert_(eps_xz_input);
-		eps_yz_input=this->GetInput(StrainRateyzEnum); _assert_(eps_yz_input);
-		eps_zz_input=this->GetInput(StrainRatezzEnum); _assert_(eps_zz_input);
+		eps_xz_input=this->GetInput2(StrainRatexzEnum); _assert_(eps_xz_input);
+		eps_yz_input=this->GetInput2(StrainRateyzEnum); _assert_(eps_yz_input);
+		eps_zz_input=this->GetInput2(StrainRatezzEnum); _assert_(eps_zz_input);
 	}
 
@@ -165,16 +176,16 @@
 
 	/* Retrieve domain-dependent inputs */
-	Input* n_input=this->GetInput(MaterialsRheologyNEnum); _assert_(n_input);
-	Input* damage_input = NULL;
-	Input* B_input = NULL;
+	Input2* n_input=this->GetInput2(MaterialsRheologyNEnum); _assert_(n_input);
+	Input2* damage_input = NULL;
+	Input2* B_input = NULL;
 	int domaintype;
 	parameters->FindParam(&domaintype,DomainTypeEnum);
 	if(domaintype==Domain2DhorizontalEnum){
-		damage_input = this->GetInput(DamageDbarEnum);  _assert_(damage_input);
-		B_input=this->GetInput(MaterialsRheologyBbarEnum); _assert_(B_input);
+		damage_input = this->GetInput2(DamageDbarOldEnum);  _assert_(damage_input);
+		B_input=this->GetInput2(MaterialsRheologyBbarEnum); _assert_(B_input);
 	}
 	else{
-		damage_input = this->GetInput(DamageDEnum);   _assert_(damage_input);
-		B_input=this->GetInput(MaterialsRheologyBEnum); _assert_(B_input);
+		damage_input = this->GetInput2(DamageDOldEnum);   _assert_(damage_input);
+		B_input=this->GetInput2(MaterialsRheologyBEnum); _assert_(B_input);
 	}
 
@@ -217,10 +228,10 @@
 
 	/* Add new damage input to DamageEnum and NewDamageEnum */
-	this->AddInput(NewDamageEnum,newD,this->GetElementType());
+	this->AddInput2(NewDamageEnum,newD,P1DGEnum);
 	if(domaintype==Domain2DhorizontalEnum){
-		this->AddInput(DamageDbarEnum,newD,this->GetElementType());
+		this->AddInput2(DamageDbarEnum,newD,this->GetElementType());
 	}
 	else{
-		this->AddInput(DamageDEnum,newD,this->GetElementType());
+		this->AddInput2(DamageDEnum,newD,this->GetElementType());
 	}
 
@@ -240,8 +251,8 @@
 	this->GetVerticesCoordinates(&xyz_list);
 	parameters->FindParam(&dim,DomainDimensionEnum);
-	Input* vx_input=this->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input=this->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input=NULL;
-	if(dim==3){vz_input=this->GetInput(VzEnum); _assert_(vz_input);}
+	Input2* vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input=NULL;
+	if(dim==3){vz_input=this->GetInput2(VzEnum); _assert_(vz_input);}
 
 	/*Allocate arrays*/
@@ -289,11 +300,11 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->AddInput(StrainRatexxEnum,eps_xx,P1Enum);
-	this->AddInput(StrainRatexyEnum,eps_xy,P1Enum);
-	this->AddInput(StrainRatexzEnum,eps_xz,P1Enum);
-	this->AddInput(StrainRateyyEnum,eps_yy,P1Enum);
-	this->AddInput(StrainRateyzEnum,eps_yz,P1Enum);
-	this->AddInput(StrainRatezzEnum,eps_zz,P1Enum);
-	this->AddInput(StrainRateeffectiveEnum,eps_ef,P1Enum);
+	this->AddInput2(StrainRatexxEnum,eps_xx,P1Enum);
+	this->AddInput2(StrainRatexyEnum,eps_xy,P1Enum);
+	this->AddInput2(StrainRatexzEnum,eps_xz,P1Enum);
+	this->AddInput2(StrainRateyyEnum,eps_yy,P1Enum);
+	this->AddInput2(StrainRateyzEnum,eps_yz,P1Enum);
+	this->AddInput2(StrainRatezzEnum,eps_zz,P1Enum);
+	this->AddInput2(StrainRateeffectiveEnum,eps_ef,P1Enum);
 
 	/*Clean up and return*/
@@ -389,4 +400,5 @@
 	_printf_("   id : "<<this->id <<"\n");
 	_printf_("   sid: "<<this->sid<<"\n");
+	_printf_("   lid: "<<this->lid<<"\n");
 	if(vertices){
 		const int NUM_VERTICES = this->GetNumberOfVertices();
@@ -409,14 +421,8 @@
 
 	_printf_("   inputs\n");
-	if (inputs) inputs->DeepEcho();
-	else _printf_("inputs=NULL\n");
+	if(inputs2) inputs2->DeepEcho();
+	else _printf_("inputs2=NULL\n");
 
 	return;
-}
-/*}}}*/
-void       Element::DeleteInput(int input_enum){/*{{{*/
-
-	inputs->DeleteInput(input_enum);
-
 }
 /*}}}*/
@@ -429,8 +435,9 @@
 	if(!IsOnBase()) return;
 
-	const int NUM_VERTICES 					= this->GetNumberOfVertices();
+	const int NUM_VERTICES = this->GetNumberOfVertices();
 	const int NUM_VERTICES_MONTHS_PER_YEAR	= NUM_VERTICES * 12;
 
 	int        i;
+	int*        vertexlids=xNew<int>(NUM_VERTICES);
 	IssmDouble* monthlytemperatures=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
 	IssmDouble* monthlyprec=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
@@ -439,18 +446,18 @@
 	IssmDouble* PrecipitationsPresentday=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
 	IssmDouble* tmp=xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble Delta18oPresent,Delta18oLgm,Delta18oTime;
-	IssmDouble Delta18oSurfacePresent,Delta18oSurfaceLgm,Delta18oSurfaceTime;
-	IssmDouble time,yts,finaltime,time_yr;
 
 	/*Recover parameters*/
+	IssmDouble time,yts,finaltime;
 	this->parameters->FindParam(&time,TimeEnum);
 	this->parameters->FindParam(&yts,ConstantsYtsEnum);
 	this->parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
-	time_yr=floor(time/yts)*yts;
+	this->GetVerticesLidList(vertexlids);
+	IssmDouble time_yr=floor(time/yts)*yts;
 
 	/*Recover present day temperature and precipitation*/
-	Input* input=this->inputs->GetInput(SmbTemperaturesPresentdayEnum);    _assert_(input);
-	Input* input2=this->inputs->GetInput(SmbTemperaturesLgmEnum);          _assert_(input2);
-	Input* input3=this->inputs->GetInput(SmbPrecipitationsPresentdayEnum); _assert_(input3);
+	DatasetInput2* dinput1=this->GetDatasetInput2(SmbTemperaturesPresentdayEnum);   _assert_(dinput1);
+	DatasetInput2* dinput2=this->GetDatasetInput2(SmbTemperaturesLgmEnum);          _assert_(dinput2);
+	DatasetInput2* dinput3=this->GetDatasetInput2(SmbPrecipitationsPresentdayEnum); _assert_(dinput3);
+
 	/*loop over vertices: */
 	Gauss* gauss=this->NewGauss();
@@ -458,7 +465,7 @@
 		for(int iv=0;iv<NUM_VERTICES;iv++){
 			gauss->GaussVertex(iv);
-			input->GetInputValue(&TemperaturesPresentday[iv*12+month],gauss,month/12.*yts);
-			input2->GetInputValue(&TemperaturesLgm[iv*12+month],gauss,month/12.*yts);
-			input3->GetInputValue(&PrecipitationsPresentday[iv*12+month],gauss,month/12.*yts);
+			dinput1->GetInputValue(&TemperaturesPresentday[iv*12+month],gauss,month);
+			dinput2->GetInputValue(&TemperaturesLgm[iv*12+month],gauss,month);
+			dinput3->GetInputValue(&PrecipitationsPresentday[iv*12+month],gauss,month);
 
 			PrecipitationsPresentday[iv*12+month]=PrecipitationsPresentday[iv*12+month]*yts;
@@ -467,4 +474,6 @@
 
 	/*Recover delta18o and Delta18oSurface at present day, lgm and at time t*/
+	IssmDouble Delta18oPresent,Delta18oLgm,Delta18oTime;
+	IssmDouble Delta18oSurfacePresent,Delta18oSurfaceLgm,Delta18oSurfaceTime;
 	this->parameters->FindParam(&Delta18oPresent,SmbDelta18oEnum,finaltime);
 	this->parameters->FindParam(&Delta18oLgm,SmbDelta18oEnum,(finaltime-(21000*yts)));
@@ -484,27 +493,18 @@
 
 	/*Update inputs*/
-	TransientInput* NewTemperatureInput = new TransientInput(SmbMonthlytemperaturesEnum);
-	TransientInput* NewPrecipitationInput = new TransientInput(SmbPrecipitationEnum);
 	for (int imonth=0;imonth<12;imonth++) {
 		for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlytemperatures[i*12+imonth];
 		switch(this->ObjectEnum()){
-			case TriaEnum:  NewTemperatureInput->AddTimeInput(new TriaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case PentaEnum: NewTemperatureInput->AddTimeInput(new PentaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case TetraEnum: NewTemperatureInput->AddTimeInput(new TetraInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
+			case TriaEnum: this->inputs2->SetTriaDatasetInput(SmbMonthlytemperaturesEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
+			case PentaEnum: this->inputs2->SetPentaDatasetInput(SmbMonthlytemperaturesEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
 			default: _error_("Not implemented yet");
 		}
 		for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlyprec[i*12+imonth]/yts;
 		switch(this->ObjectEnum()){
-			case TriaEnum:  NewPrecipitationInput->AddTimeInput(new TriaInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case PentaEnum: NewPrecipitationInput->AddTimeInput(new PentaInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case TetraEnum: NewPrecipitationInput->AddTimeInput(new TetraInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
+			case TriaEnum: this->inputs2->SetTriaDatasetInput(SmbPrecipitationEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
+			case PentaEnum: this->inputs2->SetPentaDatasetInput(SmbPrecipitationEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
 			default: _error_("Not implemented yet");
 		}
 	}
-	NewTemperatureInput->Configure(this->parameters);
-	NewPrecipitationInput->Configure(this->parameters);
-
-	this->inputs->AddInput(NewTemperatureInput);
-	this->inputs->AddInput(NewPrecipitationInput);
 
 	switch(this->ObjectEnum()){
@@ -512,6 +512,6 @@
 		case PentaEnum:
 		case TetraEnum:
-         this->InputExtrude(SmbMonthlytemperaturesEnum,-1);
-         this->InputExtrude(SmbPrecipitationEnum,-1);
+         this->DatasetInputExtrude(SmbMonthlytemperaturesEnum,-1);
+         this->DatasetInputExtrude(SmbPrecipitationEnum,-1);
          break;
 		default: _error_("Not implemented yet");
@@ -526,7 +526,7 @@
 	xDelete<IssmDouble>(PrecipitationsPresentday);
 	xDelete<IssmDouble>(tmp);
-
-}
-/*}}}*/
+	xDelete<int>(vertexlids);
+
+} /*}}}*/
 void       Element::Delta18opdParameterization(void){/*{{{*/
 	/*Are we on the base? If not, return*/
@@ -536,5 +536,6 @@
 	const int NUM_VERTICES_MONTHS_PER_YEAR	= NUM_VERTICES * 12;
 
-	int        	i;
+	int        	i,offset;
+	int*        vertexlids=xNew<int>(NUM_VERTICES);
 	IssmDouble* monthlytemperatures=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
 	IssmDouble* monthlyprec=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
@@ -545,42 +546,44 @@
 	IssmDouble* tmp=xNew<IssmDouble>(NUM_VERTICES);
 	IssmDouble Delta18oTime;
-	IssmDouble dpermil,f;
-	IssmDouble time,yts,time_yr,month,time_clim,time_climt,time_climp,del_clim;
-	bool isTemperatureScaled=true;
-	bool isPrecipScaled=true;
+	IssmDouble f;
+	IssmDouble time,yts,time_yr,month,time_climt,time_climp,del_clim;
 	this->parameters->FindParam(&time,TimeEnum);
 	this->parameters->FindParam(&yts,ConstantsYtsEnum);
 	this->parameters->FindParam(&f,SmbFEnum);
+	this->GetVerticesLidList(vertexlids);
 	time_yr=floor(time/yts)*yts;
-	time_clim=ceil(time/yts + 1e-10)*yts;
 	time_climt=ceil(time/yts + 1e-10)*yts;
 	time_climp=ceil(time/yts + 1e-10)*yts;
 
 	/*Get some pdd parameters*/
-	dpermil=this->FindParam(SmbDpermilEnum);
-
+	bool isTemperatureScaled,isPrecipScaled;
+	IssmDouble dpermil=this->FindParam(SmbDpermilEnum);
 	this->parameters->FindParam(&isTemperatureScaled,SmbIstemperaturescaledEnum);
 	this->parameters->FindParam(&isPrecipScaled,SmbIsprecipscaledEnum);
 
 	/*Recover present day temperature and precipitation*/
-	Input*     input=this->inputs->GetInput(SmbTemperaturesPresentdayEnum);    _assert_(input);
-	Input*     input2=this->inputs->GetInput(SmbPrecipitationsPresentdayEnum); _assert_(input2);
-	Input*     input3=NULL;
-	Input*     input4=NULL;
-	int offset;
-
-	offset=dynamic_cast<TransientInput*>(input)->GetTimeInputOffset(time_clim);
-	time_clim=dynamic_cast<TransientInput*>(input)->GetTimeByOffset(offset-11-fmod(offset-11,12.));
-
+	DatasetInput2 *dinput3 = NULL;
+	DatasetInput2 *dinput4 = NULL;
+	int            offset_t,offset_p,N;
 	if(!isTemperatureScaled){
-		input3=this->inputs->GetInput(SmbTemperaturesReconstructedEnum);           _assert_(input3);
-		offset=dynamic_cast<TransientInput*>(input3)->GetTimeInputOffset(time_climt);
-		time_climt=dynamic_cast<TransientInput*>(input3)->GetTimeByOffset(offset-11-fmod(offset-11,12.));
+		IssmDouble* time_temp_scaled = NULL;
+		parameters->FindParam(&time_temp_scaled,&N,SmbTemperaturesReconstructedYearsEnum);
+		if(!binary_search(&offset_t,time_climt,time_temp_scaled,N)) _error_("time not sorted?");
+		if(offset_t<0) offset_t=0;
+		xDelete<IssmDouble>(time_temp_scaled);
+		dinput3=this->GetDatasetInput2(SmbTemperaturesReconstructedEnum); _assert_(dinput3);
 	}
 	if(!isPrecipScaled){
-		input4=this->inputs->GetInput(SmbPrecipitationsReconstructedEnum);         _assert_(input4);
-		offset=dynamic_cast<TransientInput*>(input4)->GetTimeInputOffset(time_climp);
-		time_climp=dynamic_cast<TransientInput*>(input4)->GetTimeByOffset(offset-11-fmod(offset-11,12.));
-	}
+		IssmDouble* time_precip_scaled = NULL;
+		parameters->FindParam(&time_precip_scaled,&N,SmbPrecipitationsReconstructedYearsEnum);
+		if(!binary_search(&offset_p,time_climt,time_precip_scaled,N)) _error_("time not sorted?");
+		if(offset_p<0) offset_p=0;
+		xDelete<IssmDouble>(time_precip_scaled);
+		dinput4=this->GetDatasetInput2(SmbPrecipitationsReconstructedEnum); _assert_(dinput4);
+	}
+
+	/*Get present day temp and precip (monthly)*/
+	DatasetInput2 *dinput1 = this->GetDatasetInput2(SmbTemperaturesPresentdayEnum);   _assert_(dinput1);
+	DatasetInput2 *dinput2 = this->GetDatasetInput2(SmbPrecipitationsPresentdayEnum); _assert_(dinput2);
 
 	/*loop over vertices: */
@@ -589,16 +592,15 @@
 		for(int iv=0;iv<NUM_VERTICES;iv++) {
 			gauss->GaussVertex(iv);
-			input->GetInputValue(&TemperaturesPresentday[iv*12+month],gauss,time_clim+month/12.*yts);
-			input2->GetInputValue(&PrecipitationsPresentday[iv*12+month],gauss,time_clim+month/12.*yts);
+			dinput1->GetInputValue(&TemperaturesPresentday[iv*12+month],gauss,month);
+			dinput2->GetInputValue(&PrecipitationsPresentday[iv*12+month],gauss,month);
 			PrecipitationsPresentday[iv*12+month]=PrecipitationsPresentday[iv*12+month]*yts;
 
 			if(!isTemperatureScaled){
-				input3->GetInputValue(&TemperaturesReconstructed[iv*12+month],gauss,time_climt+month/12.*yts);
+				dinput3->GetInputValue(&TemperaturesReconstructed[iv*12+month],gauss,offset_t*12+month);
 			}
 			if(!isPrecipScaled){
-				input4->GetInputValue(&PrecipitationsReconstructed[iv*12+month],gauss,time_climp+month/12.*yts);
+				dinput4->GetInputValue(&PrecipitationsReconstructed[iv*12+month],gauss,offset_p*12+month);
 				PrecipitationsReconstructed[iv*12+month]=PrecipitationsReconstructed[iv*12+month]*yts;
 			}
-
 		}
 	}
@@ -616,27 +618,18 @@
 
 	/*Update inputs*/
-	TransientInput* NewTemperatureInput = new TransientInput(SmbMonthlytemperaturesEnum);
-	TransientInput* NewPrecipitationInput = new TransientInput(SmbPrecipitationEnum);
 	for (int imonth=0;imonth<12;imonth++) {
 		for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlytemperatures[i*12+imonth];
 		switch(this->ObjectEnum()){
-			case TriaEnum:  NewTemperatureInput->AddTimeInput(new TriaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case PentaEnum: NewTemperatureInput->AddTimeInput(new PentaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case TetraEnum: NewTemperatureInput->AddTimeInput(new TetraInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
+			case TriaEnum:  this->inputs2->SetTriaDatasetInput(SmbMonthlytemperaturesEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
+			case PentaEnum: this->inputs2->SetPentaDatasetInput(SmbMonthlytemperaturesEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
 			default: _error_("Not implemented yet");
 		}
 		for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlyprec[i*12+imonth]/yts;
 		switch(this->ObjectEnum()){
-			case TriaEnum:  NewPrecipitationInput->AddTimeInput(new TriaInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case PentaEnum: NewPrecipitationInput->AddTimeInput(new PentaInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case TetraEnum: NewPrecipitationInput->AddTimeInput(new TetraInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
+			case TriaEnum:  this->inputs2->SetTriaDatasetInput(SmbPrecipitationEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
+			case PentaEnum: this->inputs2->SetPentaDatasetInput(SmbPrecipitationEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
 			default: _error_("Not implemented yet");
 		}
 	}
-	NewTemperatureInput->Configure(this->parameters);
-	NewPrecipitationInput->Configure(this->parameters);
-
-	this->inputs->AddInput(NewTemperatureInput);
-	this->inputs->AddInput(NewPrecipitationInput);
 
 	switch(this->ObjectEnum()){
@@ -644,6 +637,6 @@
 		case PentaEnum:
 		case TetraEnum:
-         this->InputExtrude(SmbMonthlytemperaturesEnum,-1);
-         this->InputExtrude(SmbPrecipitationEnum,-1);
+         this->DatasetInputExtrude(SmbMonthlytemperaturesEnum,-1);
+         this->DatasetInputExtrude(SmbPrecipitationEnum,-1);
          break;
 		default: _error_("Not implemented yet");
@@ -659,7 +652,7 @@
 	xDelete<IssmDouble>(PrecipitationsReconstructed);
 	xDelete<IssmDouble>(tmp);
-
-}
-/*}}}*/
+	xDelete<int>(vertexlids);
+
+} /*}}}*/
 void       Element::SmbGradCompParameterization(void){/*{{{*/
 
@@ -673,10 +666,10 @@
 	IssmDouble accugrad, runoffgrad; //gradients from reference altitude
 	IssmDouble rho_water, rho_ice;
-	IssmDouble time,yts;
-
-	IssmDouble*		smb		= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble*		surf	= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble*		accu	= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble*		runoff 	= xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble time;
+
+	IssmDouble*		smb	 = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble*		surf	 = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble*		accu	 = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble*		runoff = xNew<IssmDouble>(NUM_VERTICES);
 
 	/*Get material parameters :*/
@@ -686,5 +679,4 @@
 	/*Recover parameters*/
 	parameters->FindParam(&time,TimeEnum);
-	parameters->FindParam(&yts,ConstantsYtsEnum);
 	parameters->FindParam(&accualti,SmbAccualtiEnum);
 	parameters->FindParam(&accugrad,SmbAccugradEnum);
@@ -708,16 +700,10 @@
 	switch(this->ObjectEnum()){
 	case TriaEnum:
-		this->inputs->AddInput(new TriaInput(SmbMassBalanceSubstepEnum,&smb[0],P1Enum));
-		this->inputs->AddInput(new TriaInput(SmbRunoffSubstepEnum,&runoff[0],P1Enum));
+		this->AddInput2(SmbMassBalanceSubstepEnum,&smb[0],P1Enum);
+		this->AddInput2(SmbRunoffSubstepEnum,&runoff[0],P1Enum);
 		break;
 	case PentaEnum:
-		this->inputs->AddInput(new PentaInput(SmbMassBalanceSubstepEnum,&smb[0],P1Enum));
-		this->inputs->AddInput(new PentaInput(SmbRunoffSubstepEnum,&runoff[0],P1Enum));
-		this->InputExtrude(SmbMassBalanceSubstepEnum,-1);
-		this->InputExtrude(SmbRunoffSubstepEnum,-1);
-		break;
-	case TetraEnum:
-		this->inputs->AddInput(new TetraInput(SmbMassBalanceSubstepEnum,&smb[0],P1Enum));
-		this->inputs->AddInput(new TetraInput(SmbRunoffSubstepEnum,&runoff[0],P1Enum));
+		this->AddInput2(SmbMassBalanceSubstepEnum,&smb[0],P1Enum);
+		this->AddInput2(SmbRunoffSubstepEnum,&runoff[0],P1Enum);
 		this->InputExtrude(SmbMassBalanceSubstepEnum,-1);
 		this->InputExtrude(SmbRunoffSubstepEnum,-1);
@@ -744,9 +730,9 @@
 	/*Get inputs and parameters*/
 	this->FindParam(&dim,DomainDimensionEnum);
-	Input* vx_input = this->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = this->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input = NULL;
+	Input2* vx_input = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input = NULL;
 	if(dim==3){
-		vz_input = this->GetInput(VzEnum); _assert_(vz_input);
+		vz_input = this->GetInput2(VzEnum); _assert_(vz_input);
 	}
 	this->GetVerticesCoordinates(&xyz_list);
@@ -775,5 +761,5 @@
 	return divergence;
 }/*}}}*/
-void       Element::dViscositydBFS(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){/*{{{*/
+void       Element::dViscositydBFS(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -812,5 +798,5 @@
 }
 /*}}}*/
-void       Element::dViscositydBHO(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::dViscositydBHO(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -849,5 +835,5 @@
 }
 /*}}}*/
-void       Element::dViscositydBSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::dViscositydBSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -886,5 +872,5 @@
 }
 /*}}}*/
-void       Element::dViscositydDSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::dViscositydDSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -917,4 +903,5 @@
 	_printf_("   id : "<<this->id <<"\n");
 	_printf_("   sid: "<<this->sid<<"\n");
+	_printf_("   lid: "<<this->lid<<"\n");
 	if(vertices){
 		const int NUM_VERTICES = this->GetNumberOfVertices();
@@ -940,6 +927,6 @@
 
 	_printf_("   inputs\n");
-	if (inputs) inputs->Echo();
-	else _printf_("inputs=NULL\n");
+	if (inputs2) inputs2->Echo();
+	else _printf_("inputs2=NULL\n");
 }
 /*}}}*/
@@ -1112,51 +1099,23 @@
 }
 /*}}}*/
-Input*     Element::GetInput(int inputenum){/*{{{*/
-	return inputs->GetInput(inputenum);
-}/*}}}*/
 void       Element::GetInputListOnNodes(IssmDouble* pvalue,int enumtype,IssmDouble defaultvalue){/*{{{*/
+	Input2 *input    = this->GetInput2(enumtype);
+	this->GetInputListOnNodes(pvalue,input,defaultvalue);
+}
+/*}}}*/
+void       Element::GetInputListOnNodes(IssmDouble* pvalue,int enumtype){/*{{{*/
+
+	Input2 *input    = this->GetInput2(enumtype);
+	if(!input) _error_("Input " << EnumToStringx(enumtype) << " not found in element");
+	this->GetInputListOnNodes(pvalue,input,0.);
+
+}
+/*}}}*/
+void       Element::GetInputListOnNodesVelocity(IssmDouble* pvalue,int enumtype){/*{{{*/
 
 	_assert_(pvalue);
 
-	Input *input    = this->GetInput(enumtype);
-	int    numnodes = this->GetNumberOfNodes();
-
-	/* Start looping on the number of vertices: */
-	if(input){
-		Gauss* gauss=this->NewGauss();
-		for(int iv=0;iv<numnodes;iv++){
-			gauss->GaussNode(this->FiniteElement(),iv);
-			input->GetInputValue(&pvalue[iv],gauss);
-		}
-		delete gauss;
-	}
-	else{
-		for(int iv=0;iv<numnodes;iv++) pvalue[iv]=defaultvalue;
-	}
-}
-/*}}}*/
-void       Element::GetInputListOnNodes(IssmDouble* pvalue,int enumtype){/*{{{*/
-
-	_assert_(pvalue);
-
-	int    numnodes = this->GetNumberOfNodes();
-	Input *input    = this->GetInput(enumtype);
-	if(!input) _error_("Input " << EnumToStringx(enumtype) << " not found in element");
-
-	/* Start looping on the number of vertices: */
-	Gauss* gauss=this->NewGauss();
-	for(int iv=0;iv<numnodes;iv++){
-		gauss->GaussNode(this->FiniteElement(),iv);
-		input->GetInputValue(&pvalue[iv],gauss);
-	}
-	delete gauss;
-}
-/*}}}*/
-void       Element::GetInputListOnNodesVelocity(IssmDouble* pvalue,int enumtype){/*{{{*/
-
-	_assert_(pvalue);
-
-	int    numnodes = this->NumberofNodesVelocity();
-	Input *input    = this->GetInput(enumtype);
+	int     numnodes = this->NumberofNodesVelocity();
+	Input2 *input    = this->GetInput2(enumtype);
 	if(!input) _error_("Input " << EnumToStringx(enumtype) << " not found in element");
 
@@ -1173,69 +1132,20 @@
 
 	/*Recover input*/
-	Input* input=this->GetInput(enumtype);
+	Input2* input2=this->GetInput2(enumtype);
+	if(!input2) _error_("input "<<EnumToStringx(enumtype)<<" not found in element");
+	this->GetInputListOnVertices(pvalue,input2,0.);
+}
+/*}}}*/
+void       Element::GetInputListOnVerticesAtTime(IssmDouble* pvalue, int enumtype, IssmDouble time){/*{{{*/
+
+	/*Recover input*/
+	Input2* input=this->GetInput2(enumtype,time);
 	if (!input) _error_("Input " << EnumToStringx(enumtype) << " not found in element");
-	/*Fetch number vertices for this element*/
-	const int NUM_VERTICES = this->GetNumberOfVertices();
-
-	/*Checks in debugging mode*/
-	_assert_(pvalue);
-
-	/* Start looping on the number of vertices: */
-	Gauss*gauss=this->NewGauss();
-	for(int iv=0;iv<NUM_VERTICES;iv++){
-		gauss->GaussVertex(iv);
-		input->GetInputValue(&pvalue[iv],gauss);
-	}
-
-	/*clean-up*/
-	delete gauss;
-}
-/*}}}*/
-void       Element::GetInputListOnVerticesAtTime(IssmDouble* pvalue, int enumtype, IssmDouble time){/*{{{*/
-
-	/*Recover input*/
-	Input* input=this->GetInput(enumtype);
-	if (!input) _error_("Input " << EnumToStringx(enumtype) << " not found in element");
-
-	/*Fetch number vertices for this element*/
-	const int NUM_VERTICES = this->GetNumberOfVertices();
-
-	/*Checks in debugging mode*/
-	_assert_(pvalue);
-
-	/* Start looping on the number of vertices: */
-	Gauss*gauss=this->NewGauss();
-	for(int iv=0;iv<NUM_VERTICES;iv++){
-		gauss->GaussVertex(iv);
-		input->GetInputValue(&pvalue[iv],gauss,time);
-	}
-
-	/*clean-up*/
-	delete gauss;
+	this->GetInputListOnVertices(pvalue,input,0.);
 }
 /*}}}*/
 void       Element::GetInputListOnVertices(IssmDouble* pvalue,int enumtype,IssmDouble defaultvalue){/*{{{*/
-
-	/*Recover input*/
-	Input* input=this->GetInput(enumtype);
-
-	/*Checks in debugging mode*/
-	_assert_(pvalue);
-
-	/*Fetch number vertices for this element*/
-	const int NUM_VERTICES = this->GetNumberOfVertices();
-
-	/* Start looping on the number of vertices: */
-	if (input){
-		Gauss* gauss=this->NewGauss();
-		for (int iv=0;iv<NUM_VERTICES;iv++){
-			gauss->GaussVertex(iv);
-			input->GetInputValue(&pvalue[iv],gauss);
-		}
-		delete gauss;
-	}
-	else{
-		for(int iv=0;iv<NUM_VERTICES;iv++) pvalue[iv]=defaultvalue;
-	}
+	Input2* input=this->GetInput2(enumtype);
+	this->GetInputListOnVertices(pvalue,input,defaultvalue);
 }
 /*}}}*/
@@ -1267,52 +1177,31 @@
 void       Element::GetInputValue(bool* pvalue,int inputenum){/*{{{*/
 
-	Input* input=inputs->GetInput(inputenum);
-	if(!input) _error_("Input " << EnumToStringx(inputenum) << " not found in element");
-	input->GetInputValue(pvalue);
+	this->inputs2->GetInputValue(pvalue,inputenum,this->lid);
 
 }/*}}}*/
 void       Element::GetInputValue(int* pvalue,int inputenum){/*{{{*/
-
-	Input* input=inputs->GetInput(inputenum);
-	if(!input) _error_("Input " << EnumToStringx(inputenum) << " not found in element");
-	input->GetInputValue(pvalue);
+	this->inputs2->GetInputValue(pvalue,inputenum,this->lid);
+}/*}}}*/
+void       Element::GetInput2Value(bool* pvalue,int inputenum){/*{{{*/
+
+	this->inputs2->GetInputValue(pvalue,inputenum,this->lid);
+
+}/*}}}*/
+void       Element::GetInput2Value(int* pvalue,int inputenum){/*{{{*/
+
+	this->inputs2->GetInputValue(pvalue,inputenum,this->lid);
 
 }/*}}}*/
 void       Element::GetInputValue(IssmDouble* pvalue,int inputenum){/*{{{*/
 
-	Input* input=inputs->GetInput(inputenum);
-	if(!input) _error_("Input " << EnumToStringx(inputenum) << " not found in element");
-	input->GetInputValue(pvalue);
+	/*FIXME: function to delete!*/
+	_error_("Gauss point should be provided (Trying to fetch \""<<EnumToStringx(inputenum)<<"\", this function should be deleted)");
 
 }/*}}}*/
 void       Element::GetInputValue(IssmDouble* pvalue,Gauss* gauss,int inputenum){/*{{{*/
 
-	Input* input=inputs->GetInput(inputenum);
+	Input2* input=this->GetInput2(inputenum);
 	if(!input) _error_("Input " << EnumToStringx(inputenum) << " not found in element");
 	input->GetInputValue(pvalue,gauss);
-
-}/*}}}*/
-void       Element::GetInputsInterpolations(Vector<IssmDouble>* interpolations){/*{{{*/
-
-	int interpolation;
-
-	/*Go through all inputs and assign interpolation in vector*/
-	_assert_(this->inputs);
-	for(int i=0;i<this->inputs->Size();i++){
-		Input* input=xDynamicCast<Input*>(this->inputs->GetObjectByOffset(i));
-		switch(input->ObjectEnum()){
-			case BoolInputEnum:
-			case DoubleInputEnum:
-			case IntInputEnum:
-				interpolations->SetValue(input->InstanceEnum(),reCast<IssmDouble>(input->ObjectEnum()),INS_VAL);
-				break;
-			case TriaInputEnum:
-				interpolation = input->GetResultInterpolation();
-				interpolations->SetValue(input->InstanceEnum(),interpolation,INS_VAL);
-				break;
-			default:
-				_error_("Input "<<EnumToStringx(input->ObjectEnum())<<" not supported yet");
-		}
-	}
 
 }/*}}}*/
@@ -1404,5 +1293,5 @@
 
 	/*Get inputs*/
-	Input* enum_input=inputs->GetInput(enum_type); _assert_(enum_input);
+	Input2* enum_input=this->GetInput2(enum_type); _assert_(enum_input);
 
 	/*Ok, we have the values, fill in the array: */
@@ -1450,9 +1339,9 @@
 	IssmDouble  value;
 	IssmDouble* values      = NULL;
-	Input*      input       = NULL;
+	Input2*     input       = NULL;
 
 	switch(type){
 		case ElementSIdEnum:
-			input=inputs->GetInput(input_enum); _assert_(input);
+			input=this->GetInput2(input_enum); _assert_(input);
 			input->GetInputAverage(&value);
 			vector->SetValue(this->sid,value,INS_VAL);
@@ -1672,8 +1561,10 @@
 /*}}}*/
 bool       Element::HasNodeOnBase(){/*{{{*/
-	return (this->inputs->Max(MeshVertexonbaseEnum)>0.);
+	Input2* input=this->GetInput2(MeshVertexonbaseEnum); _assert_(input);
+	return (input->GetInputMax()>0.);
 }/*}}}*/
 bool       Element::HasNodeOnSurface(){/*{{{*/
-	return (this->inputs->Max(MeshVertexonsurfaceEnum)>0.);
+	Input2* input=this->GetInput2(MeshVertexonsurfaceEnum); _assert_(input);
+	return (input->GetInputMax()>0.);
 }/*}}}*/
 IssmDouble Element::IceMass(bool scaled){/*{{{*/
@@ -1734,9 +1625,5 @@
 }
 /*}}}*/
-void       Element::InputChangeName(int original_enum,int new_enum){/*{{{*/
-	this->inputs->ChangeEnum(original_enum,new_enum);
-}
-/*}}}*/
-void       Element::InputCreate(IssmDouble* vector,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code){/*{{{*/
+void       Element::InputCreate(IssmDouble* vector,Inputs2* inputs2,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code){/*{{{*/
 
 	/*Intermediaries*/
@@ -1748,11 +1635,13 @@
 		const int NUM_VERTICES = this->GetNumberOfVertices();
 
-		int        *vertexids   = xNew<int>(NUM_VERTICES);
-		IssmDouble *values      = xNew<IssmDouble>(NUM_VERTICES);
+		int        *vertexids  = xNew<int>(NUM_VERTICES);
+		int        *vertexlids = xNew<int>(NUM_VERTICES);
+		IssmDouble *values     = xNew<IssmDouble>(NUM_VERTICES);
 
 		/*Recover vertices ids needed to initialize inputs*/
 		_assert_(iomodel->elements);
 		for(i=0;i<NUM_VERTICES;i++){
-			vertexids[i]=reCast<int>(iomodel->elements[NUM_VERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+			vertexids[i] =reCast<int>(iomodel->elements[NUM_VERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+			vertexlids[i]=iomodel->my_vertices_lids[vertexids[i]-1];
 		}
 
@@ -1760,9 +1649,9 @@
 		if(M==1){
 			values[0]=vector[0];
-			this->AddInput(vector_enum,values,P0Enum);
+			this->SetElementInput(inputs2,vector_enum,vector[0]);
 		}
 		else if(M==iomodel->numberofvertices){
 			for(i=0;i<NUM_VERTICES;i++) values[i]=vector[vertexids[i]-1];
-			this->AddInput(vector_enum,values,P1Enum);
+			this->SetElementInput(inputs2,NUM_VERTICES,vertexlids,values,vector_enum);
 		}
 		else if(M==iomodel->numberofvertices+1){
@@ -1770,15 +1659,14 @@
 			IssmDouble* times = xNew<IssmDouble>(N);
 			for(t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-			TransientInput* transientinput=new TransientInput(vector_enum,times,N);
+			inputs2->SetTransientInput(vector_enum,times,N);
+			TransientInput2* transientinput = inputs2->GetTransientInput(vector_enum);
 			for(t=0;t<N;t++){
 				for(i=0;i<NUM_VERTICES;i++) values[i]=vector[N*(vertexids[i]-1)+t];
 				switch(this->ObjectEnum()){
-					case TriaEnum:  transientinput->AddTimeInput(new TriaInput( vector_enum,values,P1Enum)); break;
-					case PentaEnum: transientinput->AddTimeInput(new PentaInput(vector_enum,values,P1Enum)); break;
-					case TetraEnum: transientinput->AddTimeInput(new TetraInput(vector_enum,values,P1Enum)); break;
+					case TriaEnum:  transientinput->AddTriaTimeInput( t,NUM_VERTICES,vertexlids,values,P1Enum); break;
+					case PentaEnum: transientinput->AddPentaTimeInput(t,NUM_VERTICES,vertexlids,values,P1Enum); break;
 					default: _error_("Not implemented yet");
 				}
 			}
-			this->inputs->AddInput(transientinput);
 			xDelete<IssmDouble>(times);
 		}
@@ -1790,9 +1678,19 @@
 			for(int j=0;j<N;j++) values[j]=vector[this->Sid()*N+j];
 
-			if     (N==this->GetNumberOfNodes(P1Enum)   ) this->AddInput(vector_enum,values,P1Enum);
-			else if(N==this->GetNumberOfNodes(P0Enum)   ) this->AddInput(vector_enum,values,P0Enum);
-			else if(N==this->GetNumberOfNodes(P1xP2Enum)) this->AddInput(vector_enum,values,P1xP2Enum);
-			else if(N==this->GetNumberOfNodes(P1xP3Enum)) this->AddInput(vector_enum,values,P1xP3Enum);
-			else _error_("Patch interpolation not supported yet");
+			if (N==this->GetNumberOfNodes(P1Enum)){
+				this->SetElementInput(inputs2,NUM_VERTICES,vertexlids,values,vector_enum);
+			}
+			else if(N==this->GetNumberOfNodes(P0Enum)){
+				this->SetElementInput(inputs2,vector_enum,values[0]);
+			}
+			else if(N==this->GetNumberOfNodes(P1xP2Enum)){ _assert_(this->ObjectEnum()==PentaEnum);
+				inputs2->SetPentaInput(vector_enum,P1xP2Enum,this->lid,N,values);
+			}
+			else if(N==this->GetNumberOfNodes(P1xP3Enum)){ _assert_(this->ObjectEnum()==PentaEnum);
+				inputs2->SetPentaInput(vector_enum,P1xP3Enum,this->lid,N,values);
+			}
+			else{
+				_error_("Patch interpolation not supported yet");
+			}
 
 		}
@@ -1803,4 +1701,5 @@
 		xDelete<IssmDouble>(values);
 		xDelete<int>(vertexids);
+		xDelete<int>(vertexlids);
 	}
 	else if(vector_type==2){ //element vector
@@ -1811,11 +1710,11 @@
 		if(M==iomodel->numberofelements){
 			if (code==5){ //boolean
-				this->inputs->AddInput(new BoolInput(vector_enum,reCast<bool>(vector[this->Sid()])));
+				this->SetBoolInput(inputs2,vector_enum,reCast<bool>(vector[this->Sid()]));
 			}
 			else if (code==6){ //integer
-				this->inputs->AddInput(new IntInput(vector_enum,reCast<int>(vector[this->Sid()])));
+				this->SetIntInput(inputs2,vector_enum,reCast<int>(vector[this->Sid()]));
 			}
 			else if (code==7){ //IssmDouble
-				this->inputs->AddInput(new DoubleInput(vector_enum,vector[this->Sid()]));
+				this->SetElementInput(inputs2,vector_enum,vector[this->Sid()]);
 			}
 			else _error_("could not recognize nature of vector from code " << code);
@@ -1825,29 +1724,25 @@
 			IssmDouble* times = xNew<IssmDouble>(N);
 			for(t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-			TransientInput* transientinput=new TransientInput(vector_enum,times,N);
-			TriaInput* bof=NULL;
+			inputs2->SetTransientInput(vector_enum,times,N);
+			TransientInput2* transientinput = inputs2->GetTransientInput(vector_enum);
 			for(t=0;t<N;t++){
 				value=vector[N*this->Sid()+t];
 				switch(this->ObjectEnum()){
-					case TriaEnum:  transientinput->AddTimeInput(new TriaInput( vector_enum,&value,P0Enum)); break;
-					case PentaEnum: transientinput->AddTimeInput(new PentaInput(vector_enum,&value,P0Enum)); break;
-					case TetraEnum: transientinput->AddTimeInput(new TetraInput(vector_enum,&value,P0Enum)); break;
+					case TriaEnum:  transientinput->AddTriaTimeInput( t,1,&(this->lid),&value,P0Enum); break;
+					case PentaEnum: transientinput->AddPentaTimeInput(t,1,&(this->lid),&value,P0Enum); break;
 					default: _error_("Not implemented yet");
 				}
 			}
-			this->inputs->AddInput(transientinput);
 			xDelete<IssmDouble>(times);
 		}
 		else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(vector_enum) << ") is " << M << " long");
 	}
-	else if(vector_type==3){ //element vector
+	else if(vector_type==3){ //Double array matrix
 
 		/*For right now we are static */
 		if(M==iomodel->numberofelements){
-			/*create transient input: */
-			IssmDouble* layers = xNewZeroInit<IssmDouble>(N);;
+			IssmDouble* layers = xNewZeroInit<IssmDouble>(N);
 			for(t=0;t<N;t++) layers[t] = vector[N*this->Sid()+t];
-			DoubleArrayInput* arrayinput=new DoubleArrayInput(vector_enum,layers,N);
-			this->inputs->AddInput(arrayinput);
+			inputs2->SetArrayInput(vector_enum,this->lid,layers,N);
 			xDelete<IssmDouble>(layers);
 		}
@@ -1857,13 +1752,13 @@
 }
 /*}}}*/
-void       Element::ControlInputCreate(IssmDouble* vector,IssmDouble* min_vector,IssmDouble* max_vector,IoModel* iomodel,int M,int N,int input_enum,int id){/*{{{*/
+void       Element::ControlInputCreate(IssmDouble* vector,IssmDouble* min_vector,IssmDouble* max_vector,Inputs2* inputs2,IoModel* iomodel,int M,int N,IssmDouble scale,int input_enum,int id){/*{{{*/
 
 	/*Intermediaries*/
-	const int NUM_VERTICES = this->GetNumberOfVertices();
-
-	int        *vertexids   = xNew<int>(NUM_VERTICES);
-	IssmDouble *values      = xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble *values_min  = xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble *values_max  = xNew<IssmDouble>(NUM_VERTICES);
+	const int numvertices = this->GetNumberOfVertices();
+
+	int        *vertexids   = xNew<int>(numvertices);
+	IssmDouble *values      = xNew<IssmDouble>(numvertices);
+	IssmDouble *values_min  = xNew<IssmDouble>(numvertices);
+	IssmDouble *values_max  = xNew<IssmDouble>(numvertices);
 
 	/*Some sanity checks*/
@@ -1872,60 +1767,57 @@
 	_assert_(max_vector);
 
-	/*For now we only support nodal vectors*/
-	//if(M!=iomodel->numberofvertices) _error_("not supported");
-	//if(N!=1) _error_("not supported");
-
 	/*Recover vertices ids needed to initialize inputs*/
 	_assert_(iomodel->elements);
-	for(int i=0;i<NUM_VERTICES;i++){
-		vertexids[i]=reCast<int>(iomodel->elements[NUM_VERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+	for(int i=0;i<numvertices;i++){
+		vertexids[i]=reCast<int>(iomodel->elements[numvertices*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
 	}
 
 	/*Are we in transient or static? */
-	if(M==iomodel->numberofvertices){
-		for(int i=0;i<NUM_VERTICES;i++){
-			values[i]=vector[vertexids[i]-1];
-			values_min[i] = min_vector[vertexids[i]-1];
-			values_max[i] = max_vector[vertexids[i]-1];
-		}
-		this->AddControlInput(input_enum,values,values_min,values_max,P1Enum,id);
-	}
-
-	else if(M==iomodel->numberofvertices+1){
-		/*create transient input: */
-		IssmDouble* times = xNew<IssmDouble>(N);
-		for(int t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-		/*Create the three transient inputs for the control input*/
-		TransientInput* values_input=new TransientInput(input_enum,times,N);
-		TransientInput* mins_input = new TransientInput(ControlInputMinsEnum,times,N);
-		TransientInput* maxs_input = new TransientInput(ControlInputMaxsEnum,times,N);
-		TransientInput* grad_input = new TransientInput(ControlInputGradEnum);
-		for(int t=0;t<N;t++){
-			for(int i=0;i<NUM_VERTICES;i++){
-				values[i]=vector[N*(vertexids[i]-1)+t];
-				values_min[i] = min_vector[N*(vertexids[i]-1)+t];
-				values_max[i] = max_vector[N*(vertexids[i]-1)+t];
-			}
-			switch(this->ObjectEnum()){
-				case TriaEnum:
-					values_input->AddTimeInput(new TriaInput(input_enum,values,P1Enum));
-					mins_input->AddTimeInput(new TriaInput(ControlInputMinsEnum,values_min,P1Enum));
-					maxs_input->AddTimeInput(new TriaInput(ControlInputMaxsEnum,values_max,P1Enum));
-					break;
-				case PentaEnum:
-					values_input->AddTimeInput(new PentaInput(input_enum,values,P1Enum));
-					mins_input->AddTimeInput(new PentaInput(ControlInputMinsEnum,values_min,P1Enum));
-					maxs_input->AddTimeInput(new PentaInput(ControlInputMaxsEnum,values_max,P1Enum));
-					break;
-				case TetraEnum:
-					values_input->AddTimeInput(new TetraInput(input_enum,values,P1Enum));
-					mins_input->AddTimeInput(new TetraInput(ControlInputMinsEnum,values_min,P1Enum));
-					maxs_input->AddTimeInput(new TetraInput(ControlInputMaxsEnum,values_max,P1Enum));
-					break;
-				default: _error_("Not implemented yet");
-			}
-		}
-		this->inputs->AddInput(new ControlInput(input_enum,TransientInputEnum,values_input,mins_input,maxs_input,grad_input,P1Enum,id));
-		xDelete<IssmDouble>(times);
+	if(M==iomodel->numberofvertices && N==1){
+		for(int i=0;i<numvertices;i++){
+			values[i]     = vector[vertexids[i]-1];
+			values_min[i] = scale*min_vector[vertexids[i]-1];
+			values_max[i] = scale*max_vector[vertexids[i]-1];
+		}
+		this->AddControlInput(input_enum,inputs2,iomodel,values,values_min,values_max,P1Enum,id);
+	}
+
+	else if(M==iomodel->numberofvertices+1 && N>1){
+		_error_("not supported tet");
+		///*create transient input: */
+		//IssmDouble* times = xNew<IssmDouble>(N);
+		//for(int t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
+		///*Create the three transient inputs for the control input*/
+		//TransientInput* values_input=new TransientInput(input_enum,times,N);
+		//TransientInput* mins_input = new TransientInput(ControlInputMinsEnum,times,N);
+		//TransientInput* maxs_input = new TransientInput(ControlInputMaxsEnum,times,N);
+		//TransientInput* grad_input = new TransientInput(ControlInputGradEnum);
+		//for(int t=0;t<N;t++){
+		//	for(int i=0;i<numvertices;i++){
+		//		values[i]=vector[N*(vertexids[i]-1)+t];
+		//		values_min[i] = min_vector[N*(vertexids[i]-1)+t];
+		//		values_max[i] = max_vector[N*(vertexids[i]-1)+t];
+		//	}
+		//	switch(this->ObjectEnum()){
+		//		case TriaEnum:
+		//			values_input->AddTimeInput(new TriaInput(input_enum,values,P1Enum));
+		//			mins_input->AddTimeInput(new TriaInput(ControlInputMinsEnum,values_min,P1Enum));
+		//			maxs_input->AddTimeInput(new TriaInput(ControlInputMaxsEnum,values_max,P1Enum));
+		//			break;
+		//		case PentaEnum:
+		//			values_input->AddTimeInput(new PentaInput(input_enum,values,P1Enum));
+		//			mins_input->AddTimeInput(new PentaInput(ControlInputMinsEnum,values_min,P1Enum));
+		//			maxs_input->AddTimeInput(new PentaInput(ControlInputMaxsEnum,values_max,P1Enum));
+		//			break;
+		//		case TetraEnum:
+		//			values_input->AddTimeInput(new TetraInput(input_enum,values,P1Enum));
+		//			mins_input->AddTimeInput(new TetraInput(ControlInputMinsEnum,values_min,P1Enum));
+		//			maxs_input->AddTimeInput(new TetraInput(ControlInputMaxsEnum,values_max,P1Enum));
+		//			break;
+		//		default: _error_("Not implemented yet");
+		//	}
+		//}
+		//this->inputs->AddInput(new ControlInput(input_enum,TransientInputEnum,values_input,mins_input,maxs_input,grad_input,P1Enum,id));
+		//xDelete<IssmDouble>(times);
 	}
 	else _error_("not currently supported type of M and N attempted");
@@ -1933,8 +1825,10 @@
 	/*clean up*/
 	xDelete<IssmDouble>(values);
+	xDelete<IssmDouble>(values_min);
+	xDelete<IssmDouble>(values_max);
 	xDelete<int>(vertexids);
 }
 /*}}}*/
-void       Element::DatasetInputAdd(int enum_type,IssmDouble* vector,IoModel* iomodel,int M,int N,int vector_type,int input_enum,int code,int input_id){/*{{{*/
+void       Element::DatasetInputAdd(int enum_type,IssmDouble* vector,Inputs2* inputs2,IoModel* iomodel,int M,int N,int vector_type,int input_enum,int code,int input_id){/*{{{*/
 	/*enum_type: the name of the DatasetInput (eg Outputdefinition1)
 	 * vector: information being stored (eg observations)
@@ -1945,18 +1839,5 @@
 
 	/*Intermediaries*/
-	int					i,t;
-	DatasetInput*		datasetinput = NULL;
-
-	/*Get input if it already exists*/
-	Input*  tempinput = GetInput(enum_type);
-	if(tempinput){
-		/*Cast it to a Datasetinput*/
-		if(tempinput->ObjectEnum()!=DatasetInputEnum) _error_("don't know what to do");
-		datasetinput = (DatasetInput*)tempinput;
-	}
-	else{
-		datasetinput=new DatasetInput(enum_type);
-		this->inputs->AddInput(datasetinput);
-	}
+	int i,t;
 
 	/*Branch on type of vector: nodal or elementary: */
@@ -1965,11 +1846,13 @@
 		const int NUM_VERTICES = this->GetNumberOfVertices();
 
-		int        *vertexids   = xNew<int>(NUM_VERTICES);
-		IssmDouble *values      = xNew<IssmDouble>(NUM_VERTICES);
+		int        *vertexids  = xNew<int>(NUM_VERTICES);
+		int        *vertexlids = xNew<int>(NUM_VERTICES);
+		IssmDouble *values     = xNew<IssmDouble>(NUM_VERTICES);
 
 		/*Recover vertices ids needed to initialize inputs*/
 		_assert_(iomodel->elements);
 		for(i=0;i<NUM_VERTICES;i++){
-			vertexids[i]=reCast<int>(iomodel->elements[NUM_VERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+			vertexids[i] =reCast<int>(iomodel->elements[NUM_VERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+			vertexlids[i]=iomodel->my_vertices_lids[vertexids[i]-1];
 		}
 
@@ -1977,86 +1860,40 @@
 		if(M==1){
 			values[0]=vector[0];
-			switch(this->ObjectEnum()){
-				case TriaEnum:  datasetinput->AddInput(new TriaInput(input_enum,values,P0Enum),input_id); break;
-				case PentaEnum: datasetinput->AddInput(new PentaInput(input_enum,values,P0Enum),input_id); break;
-				case TetraEnum: datasetinput->AddInput(new TetraInput(input_enum,values,P0Enum),input_id); break;
-				default: _error_("Not implemented yet");
-			}
+			//this->AddInput2(vector_enum,values,P0Enum);
+			_error_("not implemented yet");
 		}
 		else if(M==iomodel->numberofvertices){
 			for(i=0;i<NUM_VERTICES;i++) values[i]=vector[vertexids[i]-1];
 			switch(this->ObjectEnum()){
-				case TriaEnum:  datasetinput->AddInput(new TriaInput(input_enum,values,P1Enum),input_id); break;
-				case PentaEnum: datasetinput->AddInput(new PentaInput(input_enum,values,P1Enum),input_id); break;
-				case TetraEnum: datasetinput->AddInput(new TetraInput(input_enum,values,P1Enum),input_id); break;
-				default: _error_("Not implemented yet");
-			}  }
+				case TriaEnum:  inputs2->SetTriaDatasetInput(enum_type,input_id,P1Enum,NUM_VERTICES,vertexlids,values); break;
+				case PentaEnum: inputs2->SetPentaDatasetInput(enum_type,input_id,P1Enum,NUM_VERTICES,vertexlids,values); break;
+				default: _error_("Not implemented yet for "<<this->ObjectEnum());
+			}
+		}
 		else if(M==iomodel->numberofvertices+1){
 			/*create transient input: */
 			IssmDouble* times = xNew<IssmDouble>(N);
 			for(t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-			TransientInput* transientinput=new TransientInput(input_enum,times,N);
+			TransientInput2* transientinput = inputs2->SetDatasetTransientInput(enum_type,input_id,times,N);
 			for(t=0;t<N;t++){
 				for(i=0;i<NUM_VERTICES;i++) values[i]=vector[N*(vertexids[i]-1)+t];
 				switch(this->ObjectEnum()){
-					case TriaEnum:  transientinput->AddTimeInput(new TriaInput( input_enum,values,P1Enum)); break;
-					case PentaEnum: transientinput->AddTimeInput(new PentaInput(input_enum,values,P1Enum)); break;
-					case TetraEnum: transientinput->AddTimeInput(new TetraInput(input_enum,values,P1Enum)); break;
+					case TriaEnum:  transientinput->AddTriaTimeInput( t,NUM_VERTICES,vertexlids,values,P1Enum); break;
+					case PentaEnum: transientinput->AddPentaTimeInput(t,NUM_VERTICES,vertexlids,values,P1Enum); break;
 					default: _error_("Not implemented yet");
 				}
 			}
-			datasetinput->AddInput(transientinput,input_id);
 			xDelete<IssmDouble>(times);
 		}
-		else if(M==iomodel->numberofelements){
-
-			/*This is a Patch!*/
-			xDelete<IssmDouble>(values);
-			values = xNew<IssmDouble>(N);
-			for(int j=0;j<N;j++) values[j]=vector[this->Sid()*N+j];
-
-			if     (N==this->GetNumberOfNodes(P1Enum)   ){
-				switch(this->ObjectEnum()){
-					case TriaEnum:  datasetinput->AddInput(new TriaInput(input_enum,values,P1Enum),input_id); break;
-					case PentaEnum: datasetinput->AddInput(new PentaInput(input_enum,values,P1Enum),input_id); break;
-					case TetraEnum: datasetinput->AddInput(new TetraInput(input_enum,values,P1Enum),input_id); break;
-					default: _error_("Not implemented yet");
-				}
-			}
-			else if(N==this->GetNumberOfNodes(P0Enum)   ){
-				switch(this->ObjectEnum()){
-					case TriaEnum:  datasetinput->AddInput(new TriaInput(input_enum,values,P0Enum),input_id); break;
-					case PentaEnum: datasetinput->AddInput(new PentaInput(input_enum,values,P0Enum),input_id); break;
-					case TetraEnum: datasetinput->AddInput(new TetraInput(input_enum,values,P0Enum),input_id); break;
-					default: _error_("Not implemented yet");
-				}
-			}
-			else if(N==this->GetNumberOfNodes(P1xP2Enum)){
-				switch(this->ObjectEnum()){
-					case TriaEnum:  datasetinput->AddInput(new TriaInput(input_enum,values,P1xP2Enum),input_id); break;
-					case PentaEnum: datasetinput->AddInput(new PentaInput(input_enum,values,P1xP2Enum),input_id); break;
-					case TetraEnum: datasetinput->AddInput(new TetraInput(input_enum,values,P1xP2Enum),input_id); break;
-					default: _error_("Not implemented yet");
-				}
-			}
-			else if(N==this->GetNumberOfNodes(P1xP3Enum)) {
-				switch(this->ObjectEnum()){
-					case TriaEnum:  datasetinput->AddInput(new TriaInput(input_enum,values,P1xP3Enum),input_id); break;
-					case PentaEnum: datasetinput->AddInput(new PentaInput(input_enum,values,P1xP3Enum),input_id); break;
-					case TetraEnum: datasetinput->AddInput(new TetraInput(input_enum,values,P1xP3Enum),input_id); break;
-					default: _error_("Not implemented yet");
-				}
-			}
-			else _error_("Patch interpolation not supported yet");
-
-		}
 		else{
-			_error_("nodal vector is either numberofvertices or numberofvertices+1 long. Field provided (" << EnumToStringx(input_enum) << ") is " << M << " long");
+			_error_("not implemented yet (M="<<M<<")");
 		}
 
 		xDelete<IssmDouble>(values);
 		xDelete<int>(vertexids);
+		xDelete<int>(vertexlids);
 	}
 	else if(vector_type==2){ //element vector
+		_error_("not supported");
 
 		IssmDouble value;
@@ -2065,57 +1902,53 @@
 		if(M==iomodel->numberofelements){
 			if (code==5){ //boolean
-				datasetinput->AddInput(new BoolInput(input_enum,reCast<bool>(vector[this->Sid()])),input_id);
+				_error_("not implemented");
+				//datasetinput->AddInput(new BoolInput(input_enum,reCast<bool>(vector[this->Sid()])),input_id);
 			}
 			else if (code==6){ //integer
-				datasetinput->AddInput(new IntInput(input_enum,reCast<int>(vector[this->Sid()])),input_id);
+				_error_("not implemented");
+				//datasetinput->AddInput(new IntInput(input_enum,reCast<int>(vector[this->Sid()])),input_id);
 			}
 			else if (code==7){ //IssmDouble
-				datasetinput->AddInput(new DoubleInput(input_enum,vector[this->Sid()]),input_id);
+				_error_("not implemented");
+				//datasetinput->AddInput(new DoubleInput(input_enum,vector[this->Sid()]),input_id);
 			}
 			else _error_("could not recognize nature of vector from code " << code);
 		}
 		else if(M==iomodel->numberofelements+1){
-			/*create transient input: */
-			IssmDouble* times = xNew<IssmDouble>(N);
-			for(t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
-			TransientInput* transientinput=new TransientInput(input_enum,times,N);
-			TriaInput* bof=NULL;
-			for(t=0;t<N;t++){
-				value=vector[N*this->Sid()+t];
-				switch(this->ObjectEnum()){
-					case TriaEnum:  transientinput->AddTimeInput(new TriaInput( input_enum,&value,P0Enum)); break;
-					case PentaEnum: transientinput->AddTimeInput(new PentaInput(input_enum,&value,P0Enum)); break;
-					case TetraEnum: transientinput->AddTimeInput(new TetraInput(input_enum,&value,P0Enum)); break;
-					default: _error_("Not implemented yet");
-				}
-			}
-			datasetinput->AddInput(transientinput,input_id);
-			xDelete<IssmDouble>(times);
+			_error_("not supported");
+			///*create transient input: */
+			//IssmDouble* times = xNew<IssmDouble>(N);
+			//for(t=0;t<N;t++) times[t] = vector[(M-1)*N+t];
+			//TransientInput* transientinput=new TransientInput(input_enum,times,N);
+			//TriaInput* bof=NULL;
+			//for(t=0;t<N;t++){
+			//	value=vector[N*this->Sid()+t];
+			//	switch(this->ObjectEnum()){
+			//		case TriaEnum:  transientinput->AddTimeInput(new TriaInput( input_enum,&value,P0Enum)); break;
+			//		case PentaEnum: transientinput->AddTimeInput(new PentaInput(input_enum,&value,P0Enum)); break;
+			//		case TetraEnum: transientinput->AddTimeInput(new TetraInput(input_enum,&value,P0Enum)); break;
+			//		default: _error_("Not implemented yet");
+			//	}
+			//}
+			//xDelete<IssmDouble>(times);
 		}
 		else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(input_enum) << ") is " << M << " long");
 	}
 	else if(vector_type==3){ //element vector
-
-		/*For right now we are static */
-		if(M==iomodel->numberofelements){
-			/*create transient input: */
-			IssmDouble* layers = xNewZeroInit<IssmDouble>(N);;
-			for(t=0;t<N;t++) layers[t] = vector[N*this->Sid()+t];
-			DoubleArrayInput* arrayinput=new DoubleArrayInput(input_enum,layers,N);
-			datasetinput->AddInput(arrayinput,input_id);
-			xDelete<IssmDouble>(layers);
-		}
-		else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(input_enum) << ") is " << M << " long");
-	}
-	else _error_("Cannot add input for vector type " << vector_type << " (not supported)");
-}
-/*}}}*/
-void       Element::InputDuplicate(int original_enum,int new_enum){/*{{{*/
-
-	if(!IsInputEnum(original_enum)) _error_("Enum "<<EnumToStringx(original_enum)<<" is not in IsInput");
-
-	/*Call inputs method*/
-	this->inputs->DuplicateInput(original_enum,new_enum);
-
+		_error_("not supported");
+
+		///*For right now we are static */
+		//if(M==iomodel->numberofelements){
+		//	/*create transient input: */
+		//	IssmDouble* layers = xNewZeroInit<IssmDouble>(N);;
+		//	for(t=0;t<N;t++) layers[t] = vector[N*this->Sid()+t];
+		//	DoubleArrayInput* arrayinput=new DoubleArrayInput(input_enum,layers,N);
+		//	xDelete<IssmDouble>(layers);
+		//}
+		//else _error_("element vector is either numberofelements or numberofelements+1 long. Field provided (" << EnumToStringx(input_enum) << ") is " << M << " long");
+	}
+	else{
+		_error_("Cannot add input for vector type " << vector_type << " (not supported)");
+	}
 }
 /*}}}*/
@@ -2126,5 +1959,5 @@
 
 	/*update input*/
-	this->inputs->AddInput(new IntInput(name,constant));
+	this->SetIntInput(this->inputs2,name,constant);
 }
 /*}}}*/
@@ -2135,5 +1968,5 @@
 
 	/*update input*/
-	this->inputs->AddInput(new DoubleInput(name,constant));
+	this->SetElementInput(name,constant);
 }
 /*}}}*/
@@ -2144,7 +1977,13 @@
 
 	/*update input*/
-	this->inputs->AddInput(new BoolInput(name,constant));
-}
-/*}}}*/
+	this->SetBoolInput(this->inputs2,name,constant);
+}
+/*}}}*/
+bool       Element::IsOnSurface(){/*{{{*/
+	return this->isonsurface;
+}/*}}}*/
+bool       Element::IsOnBase(){/*{{{*/
+	return this->isonbase;
+}/*}}}*/
 bool       Element::IsFloating(){/*{{{*/
 
@@ -2153,14 +1992,16 @@
 	parameters->FindParam(&migration_style,GroundinglineMigrationEnum);
 
+	Input2* input = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(input);
+
 	if(migration_style==SubelementMigrationEnum){ //Floating if all nodes are floating
-		if(this->inputs->Max(MaskGroundediceLevelsetEnum) <= 0.) shelf=true;
+		if(input->GetInputMax() <= 0.) shelf=true;
 		else shelf=false;
 	}
 	else if(migration_style==ContactEnum){
-		if(this->inputs->Min(MaskGroundediceLevelsetEnum) < 0.) shelf=true;
+		if(input->GetInputMin() < 0.) shelf=true;
 		else shelf=false;
 	}
 	else if(migration_style==NoneEnum || migration_style==AggressiveMigrationEnum || migration_style==SoftMigrationEnum || migration_style==GroundingOnlyEnum){ //Floating if all nodes are floating
-		if(this->inputs->Min(MaskGroundediceLevelsetEnum) > 0.) shelf=false;
+		if(input->GetInputMin() > 0.) shelf=false;
 		else shelf=true;
 	}
@@ -2171,5 +2012,6 @@
 bool       Element::IsGrounded(){/*{{{*/
 
-	if(this->inputs->Max(MaskGroundediceLevelsetEnum) > 0.){
+	Input2* input=this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(input);
+	if(input->GetInputMax() > 0.){
 		return true;
 	}
@@ -2179,9 +2021,21 @@
 }/*}}}*/
 bool       Element::IsIceInElement(){/*{{{*/
-	return (this->inputs->Min(MaskIceLevelsetEnum)<0.);
+	Input2* input=this->GetInput2(MaskIceLevelsetEnum); _assert_(input);
+	return (input->GetInputMin()<0.);
+}
+/*}}}*/
+bool       Element::IsIceOnlyInElement(){/*{{{*/
+	Input2* input=this->GetInput2(MaskIceLevelsetEnum); _assert_(input);
+	return (input->GetInputMax()<0.);
 }
 /*}}}*/
 bool       Element::IsLandInElement(){/*{{{*/
-	return (this->inputs->Max(MaskLandLevelsetEnum)>0.);
+	Input2* input=this->GetInput2(MaskLandLevelsetEnum); _assert_(input);
+	return (input->GetInputMax()>0.);
+}
+/*}}}*/
+bool       Element::IsOceanInElement(){/*{{{*/
+	Input2* input=this->GetInput2(MaskOceanLevelsetEnum); _assert_(input);
+	return (input->GetInputMax()>0.);
 }
 /*}}}*/
@@ -2211,5 +2065,5 @@
 
 	/* Get parameters and inputs */
-	this->inputs->GetInputValue(&basinid,BasalforcingsIsmip6BasinIdEnum);
+	this->GetInputValue(&basinid,BasalforcingsIsmip6BasinIdEnum);
 	this->parameters->FindParam(&num_basins,BasalforcingsIsmip6NumBasinsEnum);
 	this->parameters->FindParam(&gamma0,BasalforcingsIsmip6Gamma0Enum);
@@ -2219,6 +2073,6 @@
 		this->parameters->FindParam(&mean_tf,&N,BasalforcingsIsmip6AverageTfEnum); _assert_(N==num_basins);
 	}
-	Input* tf_input = this->GetInput(BasalforcingsIsmip6TfShelfEnum);              _assert_(tf_input);
-	Input* meltanomaly_input = this->GetInput(BasalforcingsIsmip6MeltAnomalyEnum); _assert_(meltanomaly_input);
+	Input2* tf_input = this->GetInput2(BasalforcingsIsmip6TfShelfEnum);              _assert_(tf_input);
+	Input2* meltanomaly_input = this->GetInput2(BasalforcingsIsmip6MeltAnomalyEnum); _assert_(meltanomaly_input);
 	delta_t_basin = delta_t[basinid];
 	if(!islocal) mean_tf_basin = mean_tf[basinid];
@@ -2241,5 +2095,5 @@
 
 	/*Return basal melt rate*/
-	this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrate,P1Enum);
+	this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrate,P1DGEnum);
 
 	/*Cleanup and return*/
@@ -2252,5 +2106,6 @@
 }/*}}}*/
 bool       Element::IsWaterInElement(){/*{{{*/
-	return (this->inputs->Max(MaskOceanLevelsetEnum)>0.);
+	Input2* input=this->GetInput2(MaskOceanLevelsetEnum); _assert_(input);
+	return (input->GetInputMax()>0.);
 }
 /*}}}*/
@@ -2260,6 +2115,7 @@
 
 	IssmDouble  deepwaterel,upperwaterel,deepwatermelt,upperwatermelt;
-	IssmDouble* base     		= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble* values   		= xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *base         = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *values       = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *perturbation = xNew<IssmDouble>(NUM_VERTICES);
 	IssmDouble 	time;
 
@@ -2272,4 +2128,5 @@
 
 	this->GetInputListOnVertices(base,BaseEnum);
+   this->GetInputListOnVertices(perturbation,BasalforcingsPerturbationMeltingRateEnum);
 	for(int i=0;i<NUM_VERTICES;i++){
 		if(base[i]>=upperwaterel){
@@ -2280,10 +2137,14 @@
 		}
 		else{
-			values[i]=deepwatermelt*(base[i]-upperwaterel)/(deepwaterel-upperwaterel);
-		}
-	}
-
-	this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
+			IssmDouble alpha = (base[i]-upperwaterel)/(deepwaterel-upperwaterel);
+			values[i]=deepwatermelt*alpha+(1.-alpha)*upperwatermelt;
+		}
+
+      values[i]+=perturbation[i];
+	}
+
+	this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
 	xDelete<IssmDouble>(base);
+   xDelete<IssmDouble>(perturbation);
 	xDelete<IssmDouble>(values);
 
@@ -2293,9 +2154,9 @@
 	const int NUM_VERTICES = this->GetNumberOfVertices();
 
-	IssmDouble* deepwatermelt	= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble* deepwaterel     = xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble* upperwaterel	= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble* base			= xNew<IssmDouble>(NUM_VERTICES);
-	IssmDouble* values			= xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *deepwatermelt = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *deepwaterel   = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *upperwaterel  = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *base          = xNew<IssmDouble>(NUM_VERTICES);
+	IssmDouble *values        = xNew<IssmDouble>(NUM_VERTICES);
 
 	this->GetInputListOnVertices(base,BaseEnum);
@@ -2310,5 +2171,5 @@
 	}
 
-	this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
+	this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
 	xDelete<IssmDouble>(base);
 	xDelete<IssmDouble>(deepwaterel);
@@ -2360,5 +2221,5 @@
 	}
 
-	this->AddInput(BasalforcingsGeothermalfluxEnum,values,P1Enum);
+	this->AddInput2(BasalforcingsGeothermalfluxEnum,values,P1Enum);
 	xDelete<IssmDouble>(xyz_list);
 	xDelete<IssmDouble>(values);
@@ -2369,5 +2230,4 @@
 	_assert_(this);
 	if(marshall_direction==MARSHALLING_BACKWARD){
-		inputs=new Inputs();
 		nodes = NULL;
 	}
@@ -2377,8 +2237,7 @@
 	MARSHALLING(id);
 	MARSHALLING(sid);
+	MARSHALLING(lid);
 	MARSHALLING(element_type);
 	MARSHALLING_DYNAMIC(element_type_list,int,numanalyses);
-	inputs->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-
 }
 /*}}}*/
@@ -2459,9 +2318,9 @@
 		}
 	}
-	this->AddInput(MaskGroundediceLevelsetEnum,&phi[0],P1Enum);
+	this->AddInput2(MaskGroundediceLevelsetEnum,&phi[0],P1Enum);
 
 	/*Update inputs*/
-	this->AddInput(SurfaceEnum,&s[0],P1Enum);
-	this->AddInput(BaseEnum,&b[0],P1Enum);
+	this->AddInput2(SurfaceEnum,&s[0],P1Enum);
+	this->AddInput2(BaseEnum,&b[0],P1Enum);
 
 	/*Delete*/
@@ -2499,5 +2358,5 @@
 	}
 
-	this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
+	this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
 	xDelete<IssmDouble>(base);
 	xDelete<IssmDouble>(bed);
@@ -2538,5 +2397,5 @@
 	}
 
-	this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
+	this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
 	xDelete<IssmDouble>(base);
 	xDelete<IssmDouble>(values);
@@ -2553,4 +2412,5 @@
 
 	int i;
+	int*        vertexlids=xNew<int>(NUM_VERTICES);
 	IssmDouble* monthlytemperatures=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
 	IssmDouble* monthlyprec=xNew<IssmDouble>(NUM_VERTICES_MONTHS_PER_YEAR);
@@ -2561,14 +2421,18 @@
 	IssmDouble* tmp=xNew<IssmDouble>(NUM_VERTICES);
 	IssmDouble TdiffTime,PfacTime;
+
+	/*Recover parameters*/
 	IssmDouble time,yts,time_yr;
 	this->parameters->FindParam(&time,TimeEnum);
 	this->parameters->FindParam(&yts,ConstantsYtsEnum);
+	this->GetVerticesLidList(vertexlids);
 	time_yr=floor(time/yts)*yts;
 
 	/*Recover present day temperature and precipitation*/
-	Input*     input=this->inputs->GetInput(SmbTemperaturesPresentdayEnum);    _assert_(input);
-	Input*     input2=this->inputs->GetInput(SmbTemperaturesLgmEnum);          _assert_(input2);
-	Input*     input3=this->inputs->GetInput(SmbPrecipitationsPresentdayEnum); _assert_(input3);
-	Input*     input4=this->inputs->GetInput(SmbPrecipitationsLgmEnum);        _assert_(input4);
+	DatasetInput2* dinput1=this->GetDatasetInput2(SmbTemperaturesPresentdayEnum);   _assert_(dinput1);
+	DatasetInput2* dinput2=this->GetDatasetInput2(SmbTemperaturesLgmEnum);          _assert_(dinput2);
+	DatasetInput2* dinput3=this->GetDatasetInput2(SmbPrecipitationsPresentdayEnum); _assert_(dinput3);
+	DatasetInput2* dinput4=this->GetDatasetInput2(SmbPrecipitationsLgmEnum);        _assert_(dinput4);
+
 	/*loop over vertices: */
 	Gauss* gauss=this->NewGauss();
@@ -2576,8 +2440,8 @@
 		for(int iv=0;iv<NUM_VERTICES;iv++) {
 			gauss->GaussVertex(iv);
-			input->GetInputValue(&TemperaturesPresentday[iv*12+month],gauss,month/12.*yts);
-			input2->GetInputValue(&TemperaturesLgm[iv*12+month],gauss,month/12.*yts);
-			input3->GetInputValue(&PrecipitationsPresentday[iv*12+month],gauss,month/12.*yts);
-			input4->GetInputValue(&PrecipitationsLgm[iv*12+month],gauss,month/12.*yts);
+			dinput1->GetInputValue(&TemperaturesPresentday[iv*12+month],gauss,month);
+			dinput2->GetInputValue(&TemperaturesLgm[iv*12+month],gauss,month);
+			dinput3->GetInputValue(&PrecipitationsPresentday[iv*12+month],gauss,month);
+			dinput4->GetInputValue(&PrecipitationsLgm[iv*12+month],gauss,month);
 
 			PrecipitationsPresentday[iv*12+month]=PrecipitationsPresentday[iv*12+month]*yts;
@@ -2599,27 +2463,18 @@
 
 	/*Update inputs*/
-	TransientInput* NewTemperatureInput = new TransientInput(SmbMonthlytemperaturesEnum);
-	TransientInput* NewPrecipitationInput = new TransientInput(SmbPrecipitationEnum);
 	for (int imonth=0;imonth<12;imonth++) {
 		for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlytemperatures[i*12+imonth];
 		switch(this->ObjectEnum()){
-			case TriaEnum:  NewTemperatureInput->AddTimeInput(new TriaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case PentaEnum: NewTemperatureInput->AddTimeInput(new PentaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case TetraEnum: NewTemperatureInput->AddTimeInput(new TetraInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
+			case TriaEnum:  this->inputs2->SetTriaDatasetInput(SmbMonthlytemperaturesEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
+			case PentaEnum: this->inputs2->SetPentaDatasetInput(SmbMonthlytemperaturesEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
 			default: _error_("Not implemented yet");
 		}
 		for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlyprec[i*12+imonth]/yts;
 		switch(this->ObjectEnum()){
-			case TriaEnum:  NewPrecipitationInput->AddTimeInput(new TriaInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case PentaEnum: NewPrecipitationInput->AddTimeInput(new PentaInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
-			case TetraEnum: NewPrecipitationInput->AddTimeInput(new TetraInput(SmbPrecipitationEnum,&tmp[0],P1Enum),time_yr+imonth/12.*yts); break;
+			case TriaEnum:  this->inputs2->SetTriaDatasetInput(SmbPrecipitationEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
+			case PentaEnum: this->inputs2->SetPentaDatasetInput(SmbPrecipitationEnum,imonth,P1Enum,NUM_VERTICES,vertexlids,tmp); break;
 			default: _error_("Not implemented yet");
 		}
 	}
-	NewTemperatureInput->Configure(this->parameters);
-	NewPrecipitationInput->Configure(this->parameters);
-
-	this->inputs->AddInput(NewTemperatureInput);
-	this->inputs->AddInput(NewPrecipitationInput);
 
 	switch(this->ObjectEnum()){
@@ -2627,6 +2482,6 @@
 		case PentaEnum:
 		case TetraEnum:
-							this->InputExtrude(SmbMonthlytemperaturesEnum,-1);
-							this->InputExtrude(SmbPrecipitationEnum,-1);
+							this->DatasetInputExtrude(SmbMonthlytemperaturesEnum,-1);
+							this->DatasetInputExtrude(SmbPrecipitationEnum,-1);
 							break;
 		default: _error_("Not implemented yet");
@@ -2642,4 +2497,5 @@
 	xDelete<IssmDouble>(PrecipitationsLgm);
 	xDelete<IssmDouble>(tmp);
+	xDelete<int>(vertexlids);
 
 }
@@ -2664,9 +2520,9 @@
 	IssmDouble dist_gl,dist_cf;
 
-	inputs->GetInputValue(&basin_id,BasalforcingsPicoBasinIdEnum);
+	this->GetInputValue(&basin_id,BasalforcingsPicoBasinIdEnum);
 	IssmDouble boxid_max=reCast<IssmDouble>(max_boxid_basin_list[basin_id])+1.;
 
-	Input* dist_gl_input=inputs->GetInput(DistanceToGroundinglineEnum); _assert_(dist_gl_input);
-	Input* dist_cf_input=inputs->GetInput(DistanceToCalvingfrontEnum);  _assert_(dist_cf_input);
+	Input2* dist_gl_input=this->GetInput2(DistanceToGroundinglineEnum); _assert_(dist_gl_input);
+	Input2* dist_cf_input=this->GetInput2(DistanceToCalvingfrontEnum);  _assert_(dist_cf_input);
 
 	/*Get dist_gl and dist_cf at center of element*/
@@ -2695,5 +2551,5 @@
 	if(boxid==-1) _error_("No boxid found for element " << this->Sid() << "!");
 
-	this->inputs->AddInput(new IntInput(BasalforcingsPicoBoxIdEnum, boxid));
+	this->SetIntInput(this->inputs2,BasalforcingsPicoBoxIdEnum, boxid);
 
 }/*}}}*/
@@ -2703,5 +2559,5 @@
 
 	int boxid;
-	this->inputs->GetInputValue(&boxid,BasalforcingsPicoBoxIdEnum);
+	this->GetInputValue(&boxid,BasalforcingsPicoBoxIdEnum);
 	if(loopboxid!=boxid) return;
 
@@ -2731,9 +2587,8 @@
 	this->parameters->FindParam(&num_basins, BasalforcingsPicoNumBasinsEnum);
 	this->parameters->FindParam(&gamma_T,BasalforcingsPicoGammaTEnum);
-	this->parameters->FindParam(&overturning_coeff,BasalforcingsPicoOverturningCoeffEnum);
 	this->parameters->FindParam(&maxbox,BasalforcingsPicoMaxboxcountEnum);
-	this->inputs->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
+	this->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
 	this->parameters->FindParam(&isplume, BasalforcingsPicoIsplumeEnum);
-	Input* thickness_input=this->GetInput(ThicknessEnum); _assert_(thickness_input);
+	Input2 *thickness_input    = this->GetInput2(ThicknessEnum);                         _assert_(thickness_input);
 	_assert_(basinid<=num_basins);
 
@@ -2759,4 +2614,5 @@
 		IssmDouble 	s1 				= soc_farocean/(nu*lambda);
 		IssmDouble* overturnings 	= xNew<IssmDouble>(NUM_VERTICES);
+		Input2 *overturningC_input = this->GetInput2(BasalforcingsPicoOverturningCoeffEnum); _assert_(overturningC_input);
 
 		/* Start looping on the number of verticies and calculate ocean vars */
@@ -2765,4 +2621,5 @@
 			gauss->GaussVertex(i);
 			thickness_input->GetInputValue(&thickness,gauss);
+			overturningC_input->GetInputValue(&overturning_coeff,gauss);
 			pressure = (rhoi*earth_grav*1e-4)*thickness;
 			T_star   = a*soc_farocean+b-c*pressure-toc_farocean;
@@ -2780,8 +2637,8 @@
 		}
 
-		if(!isplume) this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrates_shelf,P1Enum);
-		this->AddInput(BasalforcingsPicoSubShelfOceanTempEnum,Tocs,P1Enum);
-		this->AddInput(BasalforcingsPicoSubShelfOceanSalinityEnum,Socs,P1Enum);
-		this->AddInput(BasalforcingsPicoSubShelfOceanOverturningEnum,overturnings,P1Enum);
+		if(!isplume) this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrates_shelf,P1DGEnum);
+		this->AddInput2(BasalforcingsPicoSubShelfOceanTempEnum,Tocs,P1DGEnum);
+		this->AddInput2(BasalforcingsPicoSubShelfOceanSalinityEnum,Socs,P1DGEnum);
+		this->AddInput2(BasalforcingsPicoSubShelfOceanOverturningEnum,overturnings,P1DGEnum);
 
 		/*Cleanup and return*/
@@ -2816,7 +2673,7 @@
 		}
 
-		if(!isplume) this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrates_shelf,P1Enum);
-		this->AddInput(BasalforcingsPicoSubShelfOceanTempEnum,Tocs,P1Enum);
-		this->AddInput(BasalforcingsPicoSubShelfOceanSalinityEnum,Socs,P1Enum);
+		if(!isplume) this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrates_shelf,P1DGEnum);
+		this->AddInput2(BasalforcingsPicoSubShelfOceanTempEnum,Tocs,P1DGEnum);
+		this->AddInput2(BasalforcingsPicoSubShelfOceanSalinityEnum,Socs,P1DGEnum);
 
 		/*Cleanup and return*/
@@ -2874,10 +2731,10 @@
 
 	/*Get inputs*/
-	Input* zgl_input            = this->GetInput(GroundinglineHeightEnum);                     _assert_(zgl_input);
-	Input* toc_input            = this->GetInput(BasalforcingsPicoSubShelfOceanTempEnum);      _assert_(toc_input);
-	Input* soc_input            = this->GetInput(BasalforcingsPicoSubShelfOceanSalinityEnum);  _assert_(soc_input);
-	Input* base_input           = this->GetInput(BaseEnum);                                    _assert_(base_input);
-	Input* baseslopex_input     = this->GetInput(BaseSlopeXEnum);                              _assert_(baseslopex_input);
-	Input* baseslopey_input     = this->GetInput(BaseSlopeYEnum);                              _assert_(baseslopey_input);
+	Input2* zgl_input         = this->GetInput2(GroundinglineHeightEnum);                     _assert_(zgl_input);
+	Input2* toc_input         = this->GetInput2(BasalforcingsPicoSubShelfOceanTempEnum);      _assert_(toc_input);
+	Input2* soc_input         = this->GetInput2(BasalforcingsPicoSubShelfOceanSalinityEnum);  _assert_(soc_input);
+	Input2* base_input        = this->GetInput2(BaseEnum);                                    _assert_(base_input);
+	Input2* baseslopex_input  = this->GetInput2(BaseSlopeXEnum);                              _assert_(baseslopex_input);
+	Input2* baseslopey_input  = this->GetInput2(BaseSlopeYEnum);                              _assert_(baseslopey_input);
 	this->FindParam(&yts, ConstantsYtsEnum);
 
@@ -2929,5 +2786,5 @@
 
 	/*Save computed melt-rate*/
-	this->AddInput(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrates_shelf,P1Enum);
+	this->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,basalmeltrates_shelf,P1DGEnum);
 
 	/*Cleanup and return*/
@@ -2940,5 +2797,5 @@
 	const int NUM_VERTICES_MONTHS_PER_YEAR = NUM_VERTICES * 12;
 
-	int  		i;
+	int  		i,vertexlids[MAXVERTICES];
 	IssmDouble* agd=xNew<IssmDouble>(NUM_VERTICES); // surface mass balance
 	IssmDouble* melt=xNew<IssmDouble>(NUM_VERTICES); // surface mass balance
@@ -2958,4 +2815,7 @@
 	IssmDouble mavg=1./12.; //factor for monthly average
 
+	/*Get vertex Lids for later*/
+	this->GetVerticesLidList(&vertexlids[0]);
+
 	/*Get material parameters :*/
 	rho_water=this->FindParam(MaterialsRhoSeawaterEnum);
@@ -2967,7 +2827,4 @@
 	rlapslgm=this->FindParam(SmbRlapslgmEnum);
 
-	/*Recover monthly temperatures and precipitation and compute the yearly mean temperatures*/
-	Input*     input=this->inputs->GetInput(SmbMonthlytemperaturesEnum); _assert_(input);
-	Input*     input2=this->inputs->GetInput(SmbPrecipitationEnum); _assert_(input2);
 	IssmDouble time,yts,time_yr;
 	this->parameters->FindParam(&time,TimeEnum);
@@ -2975,13 +2832,19 @@
 	time_yr=floor(time/yts)*yts;
 
+	/*Get inputs*/
+	DatasetInput2* dinput =this->GetDatasetInput2(SmbMonthlytemperaturesEnum); _assert_(dinput);
+	DatasetInput2* dinput2=this->GetDatasetInput2(SmbPrecipitationEnum);       _assert_(dinput2);
+
 	/*loop over vertices: */
 	Gauss* gauss=this->NewGauss();
 	for(int month=0;month<12;month++) {
+		/*Recover monthly temperatures and precipitation and compute the yearly mean temperatures*/
+
 		for(int iv=0;iv<NUM_VERTICES;iv++) {
 			gauss->GaussVertex(iv);
-			input->GetInputValue(&monthlytemperatures[iv*12+month],gauss,time_yr+month/12.*yts);
+			dinput->GetInputValue(&monthlytemperatures[iv*12+month],gauss,month);
 			// yearlytemperatures[iv]=yearlytemperatures[iv]+monthlytemperatures[iv*12+month]*mavg; // Has to be in Kelvin
 			monthlytemperatures[iv*12+month]=monthlytemperatures[iv*12+month]-273.15; // conversion from Kelvin to celcius for PDD module
-			input2->GetInputValue(&monthlyprec[iv*12+month],gauss,time_yr+month/12.*yts);
+			dinput2->GetInputValue(&monthlyprec[iv*12+month],gauss,month);
 			monthlyprec[iv*12+month]=monthlyprec[iv*12+month]*yts;
 		}
@@ -3003,7 +2866,9 @@
 	 *     This parameter is set, if the user wants to define the
 	 *     pdd factors regionally, if issetpddfac==1 in the d18opdd method */
-	if (issetpddfac==1){
-		input=this->GetInput(SmbPddfacSnowEnum); _assert_(input);
-		input2=this->GetInput(SmbPddfacIceEnum); _assert_(input2);
+	Input2* input  = NULL;
+	Input2* input2 = NULL;
+	if(issetpddfac==1){
+		input  = this->GetInput2(SmbPddfacSnowEnum); _assert_(input);
+		input2 = this->GetInput2(SmbPddfacIceEnum); _assert_(input2);
 	}
 
@@ -3015,5 +2880,5 @@
 
 	/*measure the surface mass balance*/
-	for (int iv = 0; iv<NUM_VERTICES; iv++){
+	for(int iv = 0; iv<NUM_VERTICES; iv++){
 		gauss->GaussVertex(iv);
 		pddsnowfac=0.;
@@ -3034,25 +2899,10 @@
 
 	/*Update inputs*/
-	// TransientInput* NewTemperatureInput = new TransientInput(SmbMonthlytemperaturesEnum);
-	// TransientInput* NewPrecipitationInput = new TransientInput(SmbPrecipitationEnum);
-	// for (int imonth=0;imonth<12;imonth++) {
-	//   for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlytemperatures[i*12+imonth];
-	//   TriaInput* newmonthinput1 = new TriaInput(SmbMonthlytemperaturesEnum,&tmp[0],P1Enum);
-	//   NewTemperatureInput->AddTimeInput(newmonthinput1,time+imonth/12.*yts);
-	//
-	//   for(i=0;i<NUM_VERTICES;i++) tmp[i]=monthlyprec[i*12+imonth]/yts;
-	//   TriaInput* newmonthinput2 = new TriaInput(SmbPrecipitationEnum,&tmp[0],P1Enum);
-	//   NewPrecipitationInput->AddTimeInput(newmonthinput2,time+imonth/12.*yts);
-	// }
-	// NewTemperatureInput->Configure(this->parameters);
-	// NewPrecipitationInput->Configure(this->parameters);
-
 	switch(this->ObjectEnum()){
 		case TriaEnum:
-			// this->inputs->AddInput(new TriaInput(TemperatureEnum,&yearlytemperatures[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbMassBalanceEnum,&agd[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbAccumulationEnum,&accu[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbMeltEnum,&melt[0],P1Enum));
+			this->AddInput2(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum);
+			this->AddInput2(SmbMassBalanceEnum,&agd[0],P1Enum);
+			this->AddInput2(SmbAccumulationEnum,&accu[0],P1Enum);
+			this->AddInput2(SmbMeltEnum,&melt[0],P1Enum);
 			break;
 		case PentaEnum:
@@ -3061,9 +2911,23 @@
 				 * the temperatures as they are for the base of the penta and
 				 * yse yearlytemperatures for the top*/
-				GetInputListOnVertices(&s[0],TemperatureEnum);
-				yearlytemperatures[0] = s[0];
-				yearlytemperatures[1] = s[1];
-				yearlytemperatures[2] = s[2];
-				this->inputs->AddInput(new PentaInput(TemperatureEnum,&yearlytemperatures[0],P1Enum));
+				PentaInput2* temp_input = xDynamicCast<PentaInput2*>(this->GetInput2(TemperatureEnum)); _assert_(temp_input);
+				switch(temp_input->GetInputInterpolationType()){
+					case P1Enum:
+						temp_input->element_values[3] = yearlytemperatures[3];
+						temp_input->element_values[4] = yearlytemperatures[4];
+						temp_input->element_values[5] = yearlytemperatures[5];
+						temp_input->SetInput(P1Enum,NUM_VERTICES,&vertexlids[0],temp_input->element_values);
+						break;
+					case P1xP2Enum:
+					case P1xP3Enum:
+					case P1xP4Enum:
+						temp_input->element_values[3] = yearlytemperatures[3];
+						temp_input->element_values[4] = yearlytemperatures[4];
+						temp_input->element_values[5] = yearlytemperatures[5];
+						temp_input->SetInput(temp_input->GetInputInterpolationType(),this->lid,this->GetNumberOfNodes(temp_input->GetInputInterpolationType()),temp_input->element_values);
+						break;
+					default:
+						_error_("Interpolation "<<EnumToStringx(temp_input->GetInputInterpolationType())<<" not supported yet");
+				}
 
 				bool isenthalpy;
@@ -3071,27 +2935,27 @@
 				if(isenthalpy){
 					/*Convert that to enthalpy for the enthalpy model*/
-					IssmDouble enthalpy[6];
-					GetInputListOnVertices(&enthalpy[0],EnthalpyEnum);
-					ThermalToEnthalpy(&enthalpy[3],yearlytemperatures[3],0.,0.);
-					ThermalToEnthalpy(&enthalpy[4],yearlytemperatures[4],0.,0.);
-					ThermalToEnthalpy(&enthalpy[5],yearlytemperatures[5],0.,0.);
-					this->inputs->AddInput(new PentaInput(EnthalpyEnum,&enthalpy[0],P1Enum));
+					PentaInput2* enth_input = xDynamicCast<PentaInput2*>(this->GetInput2(EnthalpyEnum)); _assert_(enth_input);
+					switch(enth_input->GetInputInterpolationType()){
+						case P1Enum:
+							ThermalToEnthalpy(&enth_input->element_values[3],yearlytemperatures[3],0.,0.);
+							ThermalToEnthalpy(&enth_input->element_values[4],yearlytemperatures[4],0.,0.);
+							ThermalToEnthalpy(&enth_input->element_values[5],yearlytemperatures[5],0.,0.);
+							enth_input->SetInput(P1Enum,NUM_VERTICES,&vertexlids[0],enth_input->element_values);
+							break;
+						case P1xP2Enum:
+						case P1xP3Enum:
+						case P1xP4Enum:
+							ThermalToEnthalpy(&enth_input->element_values[3],yearlytemperatures[3],0.,0.);
+							ThermalToEnthalpy(&enth_input->element_values[4],yearlytemperatures[4],0.,0.);
+							ThermalToEnthalpy(&enth_input->element_values[5],yearlytemperatures[5],0.,0.);
+							enth_input->SetInput(enth_input->GetInputInterpolationType(),this->lid,this->GetNumberOfNodes(enth_input->GetInputInterpolationType()),enth_input->element_values);
+							break;
+						default:
+							_error_("Interpolation "<<EnumToStringx(temp_input->GetInputInterpolationType())<<" not supported yet");
+					}
 				}
 			}
-			this->inputs->AddInput(new PentaInput(SmbMassBalanceEnum,&agd[0],P1Enum));
-			this->inputs->AddInput(new PentaInput(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum));
-			this->InputExtrude(TemperaturePDDEnum,-1);
-			this->InputExtrude(SmbMassBalanceEnum,-1);
-			break;
-		case TetraEnum:
-			if(IsOnSurface()){
-				GetInputListOnVertices(&s[0],TemperatureEnum);
-				yearlytemperatures[0] = s[0];
-				yearlytemperatures[1] = s[1];
-				yearlytemperatures[2] = s[2];
-				this->inputs->AddInput(new TetraInput(TemperatureEnum,&yearlytemperatures[0],P1Enum));
-			}
-			this->inputs->AddInput(new TetraInput(SmbMassBalanceEnum,&agd[0],P1Enum));
-			this->inputs->AddInput(new TetraInput(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum));
+			this->AddInput2(SmbMassBalanceEnum,&agd[0],P1Enum);
+			this->AddInput2(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum);
 			this->InputExtrude(TemperaturePDDEnum,-1);
 			this->InputExtrude(SmbMassBalanceEnum,-1);
@@ -3099,11 +2963,4 @@
 		default: _error_("Not implemented yet");
 	}
-	// this->inputs->AddInput(NewTemperatureInput);
-	// this->inputs->AddInput(NewPrecipitationInput);
-	// this->inputs->AddInput(new TriaVertexInput(ThermalSpcTemperatureEnum,&Tsurf[0]));
-
-	//this->InputExtrude(SmbMassBalanceEnum,-1);
-	// this->InputExtrude(SmbMonthlytemperaturesEnum,-1);
-	// this->InputExtrude(SmbPrecipitationEnum,-1);
 
 	/*clean-up*/
@@ -3120,5 +2977,4 @@
 	xDelete<IssmDouble>(s0p);
 	xDelete<IssmDouble>(tmp);
-
 }
 /*}}}*/
@@ -3130,5 +2986,5 @@
 	const int NUM_VERTICES_MONTHS_PER_YEAR	= NUM_VERTICES * 12;
 
-	int        	i;
+	int        	i,vertexlids[MAXVERTICES];;
 	IssmDouble* smb=xNew<IssmDouble>(NUM_VERTICES);		// surface mass balance
 	IssmDouble* melt=xNew<IssmDouble>(NUM_VERTICES);		// melting comp. of surface mass balance
@@ -3148,4 +3004,7 @@
 	IssmDouble time,yts,time_yr;
 
+	/*Get vertex Lids for later*/
+	this->GetVerticesLidList(&vertexlids[0]);
+
 	/*Get material parameters :*/
 	rho_water=this->FindParam(MaterialsRhoSeawaterEnum);
@@ -3155,10 +3014,4 @@
 	desfac=this->FindParam(SmbDesfacEnum);
 	rlaps=this->FindParam(SmbRlapsEnum);
-
-	/*Recover monthly temperatures and precipitation*/
-	Input*     input=this->inputs->GetInput(SmbMonthlytemperaturesEnum); _assert_(input);
-	Input*     input2=this->inputs->GetInput(SmbPrecipitationEnum); _assert_(input2);
-	/*Recover smb correction term */
-	Input*     input3=this->inputs->GetInput(SmbSmbCorrEnum); _assert_(input3);
 
 	/* Get time */
@@ -3171,12 +3024,17 @@
 	IssmDouble mu           = MU_0*(1000.0*86400.0)*(rho_ice/rho_water);   // (d*deg C)/(mm WE) --> (s*deg C)/(m IE)
 
+	/*Get inputs*/
+	DatasetInput2* dinput =this->GetDatasetInput2(SmbMonthlytemperaturesEnum); _assert_(dinput);
+	DatasetInput2* dinput2=this->GetDatasetInput2(SmbPrecipitationEnum);       _assert_(dinput2);
+
 	/*loop over vertices: */
 	Gauss* gauss=this->NewGauss();
 	for(int month=0;month<12;month++){
+
 		for(int iv=0;iv<NUM_VERTICES;iv++){
 			gauss->GaussVertex(iv);
-			input->GetInputValue(&monthlytemperatures[iv*12+month],gauss,(month+1)/12.*yts);
+			dinput->GetInputValue(&monthlytemperatures[iv*12+month],gauss,month);
 			monthlytemperatures[iv*12+month]=monthlytemperatures[iv*12+month]-273.15; // conversion from Kelvin to celcius for PDD module
-			input2->GetInputValue(&monthlyprec[iv*12+month],gauss,(month+1)/12.*yts);
+			dinput2->GetInputValue(&monthlyprec[iv*12+month],gauss,month);
 			monthlyprec[iv*12+month]=monthlyprec[iv*12+month]*yts;
 		}
@@ -3215,9 +3073,9 @@
 	switch(this->ObjectEnum()){
 		case TriaEnum:
-			// this->inputs->AddInput(new TriaInput(TemperatureEnum,&yearlytemperatures[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbMassBalanceEnum,&smb[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbAccumulationEnum,&accu[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbMeltEnum,&melt[0],P1Enum));
+			//this->AddInput2(TemperatureEnum,&yearlytemperatures[0],P1Enum);
+			this->AddInput2(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum);
+			this->AddInput2(SmbMassBalanceEnum,&smb[0],P1Enum);
+			this->AddInput2(SmbAccumulationEnum,&accu[0],P1Enum);
+			this->AddInput2(SmbMeltEnum,&melt[0],P1Enum);
 			break;
 		case PentaEnum:
@@ -3231,41 +3089,60 @@
 					 * the temperatures as they are for the base of the penta and
 					 * use yearlytemperatures for the top*/
-					GetInputListOnVertices(&s[0],TemperatureEnum);
-					yearlytemperatures[0] = s[0];
-					yearlytemperatures[1] = s[1];
-					yearlytemperatures[2] = s[2];
-					this->inputs->AddInput(new PentaInput(TemperatureEnum,&yearlytemperatures[0],P1Enum));
+
+					/*FIXME: look at other function Element::PositiveDegreeDay and propagate change! Just assert for now*/
+					PentaInput2* temp_input = xDynamicCast<PentaInput2*>(this->GetInput2(TemperatureEnum)); _assert_(temp_input);
+					switch(temp_input->GetInputInterpolationType()){
+						case P1Enum:
+							temp_input->element_values[3] = yearlytemperatures[3];
+							temp_input->element_values[4] = yearlytemperatures[4];
+							temp_input->element_values[5] = yearlytemperatures[5];
+							temp_input->SetInput(P1Enum,NUM_VERTICES,&vertexlids[0],temp_input->element_values);
+							break;
+						case P1DGEnum:
+						case P1xP2Enum:
+						case P1xP3Enum:
+						case P1xP4Enum:
+							temp_input->element_values[3] = yearlytemperatures[3];
+							temp_input->element_values[4] = yearlytemperatures[4];
+							temp_input->element_values[5] = yearlytemperatures[5];
+							temp_input->SetInput(temp_input->GetInputInterpolationType(),this->lid,this->GetNumberOfNodes(temp_input->GetInputInterpolationType()),temp_input->element_values);
+							break;
+						default:
+							_error_("Interpolation "<<EnumToStringx(temp_input->GetInputInterpolationType())<<" not supported yet");
+					}
+
 					if(isenthalpy){
 						/*Convert that to enthalpy for the enthalpy model*/
-						IssmDouble enthalpy[6];
-						GetInputListOnVertices(&enthalpy[0],EnthalpyEnum);
-						ThermalToEnthalpy(&enthalpy[3],yearlytemperatures[3],0.,0.);
-						ThermalToEnthalpy(&enthalpy[4],yearlytemperatures[4],0.,0.);
-						ThermalToEnthalpy(&enthalpy[5],yearlytemperatures[5],0.,0.);
-						this->inputs->AddInput(new PentaInput(EnthalpyEnum,&enthalpy[0],P1Enum));
+						PentaInput2* enth_input = xDynamicCast<PentaInput2*>(this->GetInput2(EnthalpyEnum)); _assert_(enth_input);
+						switch(enth_input->GetInputInterpolationType()){
+							case P1Enum:
+								ThermalToEnthalpy(&enth_input->element_values[3],yearlytemperatures[3],0.,0.);
+								ThermalToEnthalpy(&enth_input->element_values[4],yearlytemperatures[4],0.,0.);
+								ThermalToEnthalpy(&enth_input->element_values[5],yearlytemperatures[5],0.,0.);
+								enth_input->SetInput(P1Enum,NUM_VERTICES,&vertexlids[0],enth_input->element_values);
+								break;
+							case P1DGEnum:
+							case P1xP2Enum:
+							case P1xP3Enum:
+							case P1xP4Enum:
+								ThermalToEnthalpy(&enth_input->element_values[3],yearlytemperatures[3],0.,0.);
+								ThermalToEnthalpy(&enth_input->element_values[4],yearlytemperatures[4],0.,0.);
+								ThermalToEnthalpy(&enth_input->element_values[5],yearlytemperatures[5],0.,0.);
+								enth_input->SetInput(enth_input->GetInputInterpolationType(),this->lid,this->GetNumberOfNodes(enth_input->GetInputInterpolationType()),enth_input->element_values);
+								break;
+							default:
+								_error_("Interpolation "<<EnumToStringx(temp_input->GetInputInterpolationType())<<" not supported yet");
+						}
 					}
 				}
 			}
-			this->inputs->AddInput(new PentaInput(SmbMassBalanceEnum,&smb[0],P1Enum));
-			this->inputs->AddInput(new PentaInput(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum));
-			this->inputs->AddInput(new PentaInput(SmbAccumulationEnum,&accu[0],P1Enum));
-			this->inputs->AddInput(new PentaInput(SmbMeltEnum,&melt[0],P1Enum));
+			this->AddInput2(SmbMassBalanceEnum,&smb[0],P1Enum);
+			this->AddInput2(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum);
+			this->AddInput2(SmbAccumulationEnum,&accu[0],P1Enum);
+			this->AddInput2(SmbMeltEnum,&melt[0],P1Enum);
 			this->InputExtrude(TemperaturePDDEnum,-1);
 			this->InputExtrude(SmbMassBalanceEnum,-1);
 			this->InputExtrude(SmbAccumulationEnum,-1);
 			this->InputExtrude(SmbMeltEnum,-1);
-			break;
-		case TetraEnum:
-			if(IsOnSurface()){
-				GetInputListOnVertices(&s[0],TemperatureEnum);
-				yearlytemperatures[0] = s[0];
-				yearlytemperatures[1] = s[1];
-				yearlytemperatures[2] = s[2];
-				this->inputs->AddInput(new TetraInput(TemperatureEnum,&yearlytemperatures[0],P1Enum));
-			}
-			this->inputs->AddInput(new TetraInput(SmbMassBalanceEnum,&smb[0],P1Enum));
-			this->inputs->AddInput(new TetraInput(TemperaturePDDEnum,&yearlytemperatures[0],P1Enum));
-			this->InputExtrude(TemperaturePDDEnum,-1);
-			this->InputExtrude(SmbMassBalanceEnum,-1);
 			break;
 		default: _error_("Not implemented yet");
@@ -3323,5 +3200,4 @@
 		case SigmaNNEnum: this->ComputeSigmaNN(); break;
 		case LambdaSEnum: this->ComputeLambdaS(); break;
-		case NewDamageEnum: this->ComputeNewDamage(); break;
 		case StressIntensityFactorEnum: this->StressIntensityFactor(); break;
 		case CalvingratexEnum:
@@ -3356,82 +3232,145 @@
 		case SurfaceCrevasseEnum: this->CalvingCrevasseDepth(); break;
 		case SigmaVMEnum: this->CalvingRateVonmises(); break;
-	}
+		case PartitioningEnum: this->inputs2->SetInput(PartitioningEnum,this->lid,IssmComm::GetRank()); break;
+	}
+
+	/*If this input is not already in Inputs, maybe it needs to be computed?*/
+	switch(this->inputs2->GetInputObjectEnum(output_enum)){
+		case TriaInput2Enum:
+		case PentaInput2Enum:
+		case TransientInput2Enum:{
+			Input2* input2 = this->GetInput2(output_enum);
+			if(!input2) _error_("input "<<EnumToStringx(output_enum)<<" not found in element");
+			*pinterpolation   = input2->GetResultInterpolation();
+			*pnodesperelement = input2->GetResultNumberOfNodes();
+			*parray_size      = input2->GetResultArraySize();
+			}
+			break;
+		case BoolInput2Enum:
+			*pinterpolation   = P0Enum;
+			*pnodesperelement = 1;
+			*parray_size      = 1;
+			break;
+		case IntInput2Enum:
+			*pinterpolation   = P0Enum;
+			*pnodesperelement = 1;
+			*parray_size      = 1;
+			break;
+		case ArrayInput2Enum:{
+			int M;
+			this->inputs2->GetArray(output_enum,this->lid,NULL,&M);
+			*pinterpolation   = P0ArrayEnum;
+			*pnodesperelement = 1;
+			*parray_size      = M;
+			}
+			break;
+		default:
+			_error_("Input type \""<<EnumToStringx(this->inputs2->GetInputObjectEnum(output_enum))<<"\" not supported yet (While trying to return "<<EnumToStringx(output_enum)<<")");
+	}
+
+
+	/*Assign output pointer*/
+
+	return;
+}/*}}}*/
+void       Element::ResultToPatch(IssmDouble* values,int nodesperelement,int output_enum){/*{{{*/
 
 	/*Find input*/
-	Input* input=this->inputs->GetInput(output_enum);
-
-	/*If this input is not already in Inputs, maybe it needs to be computed?*/
+	Input2* input=this->GetInput2(output_enum);
 	if(!input) _error_("input "<<EnumToStringx(output_enum)<<" not found in element");
 
-	/*Assign output pointer*/
-	*pinterpolation   = input->GetResultInterpolation();
-	*pnodesperelement = input->GetResultNumberOfNodes();
-	*parray_size      = input->GetResultArraySize();
-}/*}}}*/
-void       Element::ResultToPatch(IssmDouble* values,int nodesperelement,int output_enum){/*{{{*/
-
-	Input* input=this->inputs->GetInput(output_enum);
-	if(!input) _error_("input "<<EnumToStringx(output_enum)<<" not found in element");
-
-	input->ResultToPatch(values,nodesperelement,this->Sid());
+	/*Cast to ElementInput*/
+	if(input->ObjectEnum()!=TriaInput2Enum && input->ObjectEnum()!=PentaInput2Enum){
+		_error_("Input "<<EnumToStringx(output_enum)<<" is not an ElementInput2");
+	}
+	ElementInput2* element_input = xDynamicCast<ElementInput2*>(input);
+
+	/*Get Number of nodes and make sure that it is the same as the one provided*/
+	int numnodes = this->GetNumberOfNodes(element_input->GetInputInterpolationType());
+	_assert_(numnodes==nodesperelement);
+
+	/*Fill in arrays*/
+	for(int i=0;i<numnodes;i++) values[this->sid*numnodes + i] = element_input->element_values[i];
 
 } /*}}}*/
 void       Element::ResultToMatrix(IssmDouble* values,int ncols,int output_enum){/*{{{*/
 
-	Input* input=this->inputs->GetInput(output_enum);
-	if(!input) _error_("input "<<EnumToStringx(output_enum)<<" not found in element");
-
-	input->ResultToMatrix(values,ncols,this->Sid());
+	IssmDouble* array = NULL;
+	int         m;
+	this->inputs2->GetArray(output_enum,this->lid,&array,&m);
+	for(int i=0;i<m;i++) values[this->Sid()*ncols + i] = array[i];
+	xDelete<IssmDouble>(array);
 
 } /*}}}*/
 void       Element::ResultToVector(Vector<IssmDouble>* vector,int output_enum){/*{{{*/
 
-	Input* input=this->inputs->GetInput(output_enum);
-	if(!input) _error_("input "<<EnumToStringx(output_enum)<<" not found in element");
-
-	switch(input->GetResultInterpolation()){
-		case P0Enum:{
-							IssmDouble  value;
-							bool        bvalue;
-							Input*      input = this->GetInput(output_enum); _assert_(input);
-							switch(input->ObjectEnum()){
-								case DoubleInputEnum:
-									input->GetInputValue(&value);
-									break;
-								case BoolInputEnum:
-									input->GetInputValue(&bvalue);
-									value=reCast<IssmDouble>(bvalue);
-									break;
-								default:
-									Gauss* gauss = this->NewGauss();
-									input->GetInputValue(&value,gauss);
-									delete gauss;
-							}
-							vector->SetValue(this->Sid(),value,INS_VAL);
-							break;
-						}
-		case P1Enum:{
-							const int NUM_VERTICES = this->GetNumberOfVertices();
-
-							IssmDouble *values      = xNew<IssmDouble>(NUM_VERTICES);
-							int        *connectivity= xNew<int>(NUM_VERTICES);
-							int        *sidlist     = xNew<int>(NUM_VERTICES);
-
-							this->GetVerticesSidList(sidlist);
-							this->GetVerticesConnectivityList(connectivity);
-							this->GetInputListOnVertices(values,output_enum);
-							for(int i=0;i<NUM_VERTICES;i++) values[i] = values[i]/reCast<IssmDouble>(connectivity[i]);
-
-							vector->SetValues(NUM_VERTICES,sidlist,values,ADD_VAL);
-
-							xDelete<IssmDouble>(values);
-							xDelete<int>(connectivity);
-							xDelete<int>(sidlist);
-							break;
-						}
+	IssmDouble values[MAXVERTICES];
+	int        connectivity[MAXVERTICES];
+	int        sidlist[MAXVERTICES];
+
+	switch(this->inputs2->GetInputObjectEnum(output_enum)){
+		case TriaInput2Enum:
+		case PentaInput2Enum:
+		case TransientInput2Enum:{
+
+			Input2* input2 = this->GetInput2(output_enum);
+			if(!input2) _error_("input "<<EnumToStringx(output_enum)<<" not found in element");
+
+			switch(input2->GetResultInterpolation()){
+				case P0Enum:{
+					IssmDouble  value;
+					bool        bvalue;
+					Gauss* gauss = this->NewGauss();
+					input2->GetInputValue(&value,gauss);
+					delete gauss;
+					vector->SetValue(this->Sid(),value,INS_VAL);
+					break;
+					}
+				case P1Enum:{
+					const int NUM_VERTICES = this->GetNumberOfVertices();
+
+
+
+					this->GetVerticesSidList(&sidlist[0]);
+					this->GetVerticesConnectivityList(&connectivity[0]);
+					this->GetInputListOnVertices(&values[0],output_enum);
+					for(int i=0;i<NUM_VERTICES;i++) values[i] = values[i]/reCast<IssmDouble>(connectivity[i]);
+					vector->SetValues(NUM_VERTICES,sidlist,values,ADD_VAL);
+					break;
+					}
+				default:
+					_error_("interpolation "<<EnumToStringx(input2->GetResultInterpolation())<<" not supported yet");
+				}
+			}
+			break;
+		case BoolInput2Enum:
+			bool bvalue;
+			this->GetInput2Value(&bvalue,output_enum);
+			vector->SetValue(this->Sid(),reCast<IssmDouble>(bvalue),INS_VAL);
+			break;
+		case IntInput2Enum:
+			int ivalue;
+			this->GetInput2Value(&ivalue,output_enum);
+			vector->SetValue(this->Sid(),reCast<IssmDouble>(ivalue),INS_VAL);
+			break;
 		default:
-					 _error_("interpolation "<<EnumToStringx(input->GetResultInterpolation())<<" not supported yet");
-	}
+			_error_("Input type \""<<EnumToStringx(this->inputs2->GetInputObjectEnum(output_enum))<<"\" not supported yet");
+	}
+
 } /*}}}*/
+void       Element::SetBoolInput(Inputs2* inputs2,int enum_in,bool value){/*{{{*/
+
+	_assert_(inputs2);
+	inputs2->SetInput(enum_in,this->lid,value);
+
+}
+/*}}}*/
+void       Element::SetIntInput(Inputs2* inputs2,int enum_in,int value){/*{{{*/
+
+	_assert_(inputs2);
+	inputs2->SetInput(enum_in,this->lid,value);
+
+}
+/*}}}*/
 void       Element::SetwiseNodeConnectivity(int* pd_nz,int* po_nz,Node* node,bool* flags,int* flagsindices,int set1_enum,int set2_enum){/*{{{*/
 
@@ -3491,5 +3430,5 @@
 	parameters->FindParam(&analysis_type,AnalysisTypeEnum);
 	if(analysis_type==StressbalanceAnalysisEnum){
-		inputs->GetInputValue(&approximation,ApproximationEnum);
+		this->GetInput2Value(&approximation,ApproximationEnum);
 		if(approximation==SSAHOApproximationEnum || approximation==SSAFSApproximationEnum){
 			parameters->FindParam(&numlayers,MeshNumberoflayersEnum);
@@ -3548,15 +3487,4 @@
 	rdl=this->FindParam(SmbRdlEnum);
 
-	/* Retrieve inputs: */
-	Input* dailysnowfall_input=this->GetInput(SmbDailysnowfallEnum); _assert_(dailysnowfall_input);
-	Input* dailyrainfall_input=this->GetInput(SmbDailyrainfallEnum); _assert_(dailyrainfall_input);
-	Input* dailydlradiation_input=this->GetInput(SmbDailydlradiationEnum); _assert_(dailydlradiation_input);
-	Input* dailydsradiation_input=this->GetInput(SmbDailydsradiationEnum); _assert_(dailydsradiation_input);
-	Input* dailywindspeed_input=this->GetInput(SmbDailywindspeedEnum); _assert_(dailywindspeed_input);
-	Input* dailypressure_input=this->GetInput(SmbDailypressureEnum); _assert_(dailypressure_input);
-	Input* dailyairdensity_input=this->GetInput(SmbDailyairdensityEnum); _assert_(dailyairdensity_input);
-	Input* dailyairhumidity_input=this->GetInput(SmbDailyairhumidityEnum); _assert_(dailyairhumidity_input);
-	Input* dailytemperature_input=this->GetInput(SmbDailytemperatureEnum); _assert_(dailytemperature_input);
-
 	/* Recover info at the vertices: */
 	GetInputListOnVertices(&s[0],SurfaceEnum);
@@ -3566,16 +3494,27 @@
 	Gauss* gauss=this->NewGauss();
 	for (int iday = 0; iday < 365; iday++){
-		for(int iv=0;iv<NUM_VERTICES;iv++) {
+		/* Retrieve inputs: */
+		Input2* dailysnowfall_input    = this->GetInput2(SmbDailysnowfallEnum,time_yr+(iday+1)/365.*yts); _assert_(dailysnowfall_input);
+		Input2* dailyrainfall_input    = this->GetInput2(SmbDailyrainfallEnum,time_yr+(iday+1)/365.*yts); _assert_(dailyrainfall_input);
+		Input2* dailydlradiation_input = this->GetInput2(SmbDailydlradiationEnum,time_yr+(iday+1)/365.*yts); _assert_(dailydlradiation_input);
+		Input2* dailydsradiation_input = this->GetInput2(SmbDailydsradiationEnum,time_yr+(iday+1)/365.*yts); _assert_(dailydsradiation_input);
+		Input2* dailywindspeed_input   = this->GetInput2(SmbDailywindspeedEnum,time_yr+(iday+1)/365.*yts); _assert_(dailywindspeed_input);
+		Input2* dailypressure_input    = this->GetInput2(SmbDailypressureEnum,time_yr+(iday+1)/365.*yts); _assert_(dailypressure_input);
+		Input2* dailyairdensity_input  = this->GetInput2(SmbDailyairdensityEnum,time_yr+(iday+1)/365.*yts); _assert_(dailyairdensity_input);
+		Input2* dailyairhumidity_input = this->GetInput2(SmbDailyairhumidityEnum,time_yr+(iday+1)/365.*yts); _assert_(dailyairhumidity_input);
+		Input2* dailytemperature_input = this->GetInput2(SmbDailytemperatureEnum,time_yr+(iday+1)/365.*yts); _assert_(dailytemperature_input);
+
+		for(int iv=0;iv<NUM_VERTICES;iv++){
 			gauss->GaussVertex(iv);
 			/* get forcing */
-			dailyrainfall_input->GetInputValue(&dailyrainfall[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailysnowfall_input->GetInputValue(&dailysnowfall[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailydlradiation_input->GetInputValue(&dailydlradiation[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailydsradiation_input->GetInputValue(&dailydsradiation[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailywindspeed_input->GetInputValue(&dailywindspeed[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailypressure_input->GetInputValue(&dailypressure[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailyairdensity_input->GetInputValue(&dailyairdensity[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailyairhumidity_input->GetInputValue(&dailyairhumidity[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
-			dailytemperature_input->GetInputValue(&dailytemperature[iv*365+iday],gauss,time_yr+(iday+1)/365.*yts);
+			dailyrainfall_input->GetInputValue(&dailyrainfall[iv*365+iday],gauss);
+			dailysnowfall_input->GetInputValue(&dailysnowfall[iv*365+iday],gauss);
+			dailydlradiation_input->GetInputValue(&dailydlradiation[iv*365+iday],gauss);
+			dailydsradiation_input->GetInputValue(&dailydsradiation[iv*365+iday],gauss);
+			dailywindspeed_input->GetInputValue(&dailywindspeed[iv*365+iday],gauss);
+			dailypressure_input->GetInputValue(&dailypressure[iv*365+iday],gauss);
+			dailyairdensity_input->GetInputValue(&dailyairdensity[iv*365+iday],gauss);
+			dailyairhumidity_input->GetInputValue(&dailyairhumidity[iv*365+iday],gauss);
+			dailytemperature_input->GetInputValue(&dailytemperature[iv*365+iday],gauss);
 
 			/* Surface temperature correction */
@@ -3607,8 +3546,8 @@
 	switch(this->ObjectEnum()){
 		case TriaEnum:
-			this->inputs->AddInput(new TriaInput(TemperatureSEMICEnum,&tsurf_out[0],P1Enum)); // TODO add TemperatureSEMICEnum to EnumDefinitions
-			this->inputs->AddInput(new TriaInput(SmbMassBalanceEnum,&smb_out[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbAccumulationEnum,&saccu_out[0],P1Enum));
-			this->inputs->AddInput(new TriaInput(SmbMeltEnum,&smelt_out[0],P1Enum));
+			this->AddInput2(TemperatureSEMICEnum,&tsurf_out[0],P1Enum); // TODO add TemperatureSEMICEnum to EnumDefinitions
+			this->AddInput2(SmbMassBalanceEnum,&smb_out[0],P1Enum);
+			this->AddInput2(SmbAccumulationEnum,&saccu_out[0],P1Enum);
+			this->AddInput2(SmbMeltEnum,&smelt_out[0],P1Enum);
 			break;
 		case PentaEnum:
@@ -3649,8 +3588,11 @@
 }
 /*}}}*/
-void       Element::SmbGemb(){/*{{{*/
+void       Element::SmbGemb(IssmDouble timeinputs, int count){/*{{{*/
+
+	/*only compute SMB at the surface: */
+	if (!IsOnSurface()) return;
 
 	/*Intermediary variables: {{{*/
-	IssmDouble isinitialized=0.0;
+	bool       isinitialized;
 	IssmDouble zTop=0.0;
 	IssmDouble dzTop=0.0;
@@ -3663,8 +3605,4 @@
 	IssmDouble C=0.0;
 	IssmDouble Tz,Vz=0.0;
-	IssmDouble rho_ice, rho_water,aSnow,aIce;
-	IssmDouble time,dt,starttime,finaltime;
-	IssmDouble timeclim=0.0;
-	IssmDouble t,smb_dt;
 	IssmDouble yts;
 	IssmDouble Ta=0.0;
@@ -3677,4 +3615,5 @@
 	IssmDouble teValue=1.0;
 	IssmDouble aValue=0.0;
+	IssmDouble dt,time,smb_dt;
 	int        aIdx=0;
 	int        denIdx=0;
@@ -3686,11 +3625,10 @@
 	IssmDouble dayEC=0.0;
 	IssmDouble initMass=0.0;
-	IssmDouble sumR=0.0;
-	IssmDouble sumM=0.0;
-	IssmDouble sumEC=0.0;
-	IssmDouble sumP=0.0;
-	IssmDouble sumW=0.0;
-	IssmDouble sumMassAdd=0.0;
-	IssmDouble sumdz_add=0.0;
+   IssmDouble sumR=0.0;
+   IssmDouble sumM=0.0;
+   IssmDouble sumEC=0.0;
+   IssmDouble sumP=0.0;
+   IssmDouble sumW=0.0;
+   IssmDouble sumMassAdd=0.0;
 	IssmDouble fac=0.0;
 	IssmDouble sumMass=0.0;
@@ -3701,7 +3639,4 @@
 	IssmDouble thermo_scaling=1.0;
 	IssmDouble adThresh=1023.0;
-	int offsetend=-1;
-	IssmDouble time0, timeend, delta;
-
 	/*}}}*/
 	/*Output variables:{{{ */
@@ -3727,5 +3662,4 @@
 	IssmDouble  mAdd = 0.0;
 	IssmDouble  dz_add = 0.0;
-
 	IssmDouble* dzini=NULL;
 	IssmDouble* dini = NULL;
@@ -3736,22 +3670,15 @@
 	IssmDouble* aini = NULL;
 	IssmDouble* Tini = NULL;
-
 	int         m=0;
-	int         count=0;
 	/*}}}*/
 
-	/*only compute SMB at the surface: */
-	if (!IsOnSurface()) return;
-
 	/*Retrieve material properties and parameters:{{{ */
-	rho_ice = FindParam(MaterialsRhoIceEnum);
-	rho_water = FindParam(MaterialsRhoFreshwaterEnum);
-	parameters->FindParam(&aSnow,SmbASnowEnum);
-	parameters->FindParam(&aIce,SmbAIceEnum);
+	IssmDouble rho_ice   = FindParam(MaterialsRhoIceEnum);
+	IssmDouble rho_water = FindParam(MaterialsRhoFreshwaterEnum);
+	IssmDouble aSnow     = parameters->FindParam(SmbASnowEnum);
+	IssmDouble aIce      = parameters->FindParam(SmbAIceEnum);
 	parameters->FindParam(&time,TimeEnum);                        /*transient core time at which we run the smb core*/
 	parameters->FindParam(&dt,TimesteppingTimeStepEnum);          /*transient core time step*/
 	parameters->FindParam(&yts,ConstantsYtsEnum);
-	parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
-	parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
 	parameters->FindParam(&smb_dt,SmbDtEnum);                     /*time period for the smb solution,  usually smaller than the glaciological dt*/
 	parameters->FindParam(&aIdx,SmbAIdxEnum);
@@ -3763,5 +3690,4 @@
 	parameters->FindParam(&t0dry,SmbT0dryEnum);
 	parameters->FindParam(&K,SmbKEnum);
-	parameters->FindParam(&isclimatology,SmbIsclimatologyEnum);
 	parameters->FindParam(&isgraingrowth,SmbIsgraingrowthEnum);
 	parameters->FindParam(&isalbedo,SmbIsalbedoEnum);
@@ -3775,31 +3701,23 @@
 	parameters->FindParam(&thermo_scaling,SmbThermoDeltaTScalingEnum);
 	parameters->FindParam(&adThresh,SmbAdThreshEnum);
-
 	/*}}}*/
 	/*Retrieve inputs: {{{*/
-	Input* zTop_input=this->GetInput(SmbZTopEnum); _assert_(zTop_input);
-	Input* dzTop_input=this->GetInput(SmbDzTopEnum); _assert_(dzTop_input);
-	Input* dzMin_input=this->GetInput(SmbDzMinEnum); _assert_(dzMin_input);
-	Input* zMax_input=this->GetInput(SmbZMaxEnum); _assert_(zMax_input);
-	Input* zMin_input=this->GetInput(SmbZMinEnum); _assert_(zMin_input);
-	Input* zY_input=this->GetInput(SmbZYEnum); _assert_(zY_input);
-	Input* Tmean_input=this->GetInput(SmbTmeanEnum); _assert_(Tmean_input);
-	Input* Vmean_input=this->GetInput(SmbVmeanEnum); _assert_(Vmean_input);
-	Input* C_input=this->GetInput(SmbCEnum); _assert_(C_input);
-	Input* Tz_input=this->GetInput(SmbTzEnum); _assert_(Tz_input);
-	Input* Vz_input=this->GetInput(SmbVzEnum); _assert_(Vz_input);
-	Input* Ta_input=this->GetInput(SmbTaEnum); _assert_(Ta_input);
-	Input* V_input=this->GetInput(SmbVEnum); _assert_(V_input);
-	Input* Dlwr_input=this->GetInput(SmbDlwrfEnum); _assert_(Dlwr_input);
-	Input* Dswr_input=this->GetInput(SmbDswrfEnum); _assert_(Dswr_input);
-	Input* P_input=this->GetInput(SmbPEnum); _assert_(P_input);
-	Input* eAir_input=this->GetInput(SmbEAirEnum); _assert_(eAir_input);
-	Input* pAir_input=this->GetInput(SmbPAirEnum); _assert_(pAir_input);
-	Input* teValue_input=this->GetInput(SmbTeValueEnum); _assert_(teValue_input);
-	Input* aValue_input=this->GetInput(SmbAValueEnum); _assert_(aValue_input);
-	Input* isinitialized_input=this->GetInput(SmbIsInitializedEnum); _assert_(isinitialized_input);
+	Input2 *zTop_input          = this->GetInput2(SmbZTopEnum);         _assert_(zTop_input);
+	Input2 *dzTop_input         = this->GetInput2(SmbDzTopEnum);        _assert_(dzTop_input);
+	Input2 *dzMin_input         = this->GetInput2(SmbDzMinEnum);        _assert_(dzMin_input);
+	Input2 *zMax_input          = this->GetInput2(SmbZMaxEnum);         _assert_(zMax_input);
+	Input2 *zMin_input          = this->GetInput2(SmbZMinEnum);         _assert_(zMin_input);
+	Input2 *zY_input            = this->GetInput2(SmbZYEnum);           _assert_(zY_input);
+	Input2 *Tmean_input         = this->GetInput2(SmbTmeanEnum);        _assert_(Tmean_input);
+	Input2 *Vmean_input         = this->GetInput2(SmbVmeanEnum);        _assert_(Vmean_input);
+	Input2 *C_input             = this->GetInput2(SmbCEnum);            _assert_(C_input);
+	Input2 *Tz_input            = this->GetInput2(SmbTzEnum);           _assert_(Tz_input);
+	Input2 *Vz_input            = this->GetInput2(SmbVzEnum);           _assert_(Vz_input);
+	Input2 *EC_input            = NULL;
+
 	/*Retrieve input values:*/
 	Gauss* gauss=this->NewGauss(1); gauss->GaussPoint(0);
 
+	this->GetInputValue(&isinitialized,SmbIsInitializedEnum);
 	zTop_input->GetInputValue(&zTop,gauss);
 	dzTop_input->GetInputValue(&dzTop,gauss);
@@ -3813,38 +3731,25 @@
 	Tz_input->GetInputValue(&Tz,gauss);
 	Vz_input->GetInputValue(&Vz,gauss);
-	teValue_input->GetInputValue(&teValue,gauss);
-	aValue_input->GetInputValue(&aValue,gauss);
-	isinitialized_input->GetInputValue(&isinitialized);
 	/*}}}*/
 
 	/*First, check that the initial structures have been setup in GEMB. If not, initialize profile variables: layer thickness dz, * density d, temperature T, etc. {{{*/
-	if(isinitialized==0.0){
+	if(!isinitialized){
 		if(VerboseSmb() && this->Sid()==0)_printf0_("smb core: Initializing grid\n");
 		//if(this->Sid()==1) for(int i=0;i<m;i++)_printf_("z[" << i << "]=" <<
 		//dz[i] << "\n");
 
-		DoubleArrayInput* dz_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbDziniEnum)); _assert_(dz_input);
-		DoubleArrayInput* d_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbDiniEnum));_assert_(d_input);
-		DoubleArrayInput* re_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbReiniEnum));_assert_(re_input);
-		DoubleArrayInput* gdn_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbGdniniEnum));_assert_(gdn_input);
-		DoubleArrayInput* gsp_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbGspiniEnum));_assert_(gsp_input);
-		DoubleInput* EC_input= dynamic_cast<DoubleInput*>(this->GetInput(SmbECiniEnum));_assert_(EC_input);
-		DoubleArrayInput* W_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbWiniEnum));_assert_(W_input);
-		DoubleArrayInput* a_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbAiniEnum));_assert_(a_input);
-		DoubleArrayInput* T_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbTiniEnum));_assert_(T_input);
-
-		dz_input->GetValues(&dzini,&m);
-		d_input->GetValues(&dini,&m);
-		re_input->GetValues(&reini,&m);
-		gdn_input->GetValues(&gdnini,&m);
-		gsp_input->GetValues(&gspini,&m);
-		EC_input->GetInputValue(&EC);
-		W_input->GetValues(&Wini,&m);
-		a_input->GetValues(&aini,&m);
-		T_input->GetValues(&Tini,&m);
-
-		/*Retrive the correct value of m (without the zeroes at the end)*/
-		Input* Size_input=this->GetInput(SmbSizeiniEnum); _assert_(Size_input);
-		Size_input->GetInputValue(&m);
+		this->inputs2->GetArray(SmbDziniEnum,this->lid,&dzini,&m);
+		this->inputs2->GetArray(SmbDiniEnum,this->lid,&dini,&m);
+		this->inputs2->GetArray(SmbReiniEnum,this->lid,&reini,&m);
+		this->inputs2->GetArray(SmbGdniniEnum,this->lid,&gdnini,&m);
+		this->inputs2->GetArray(SmbGspiniEnum,this->lid,&gspini,&m);
+		this->inputs2->GetArray(SmbWiniEnum,this->lid,&Wini,&m);
+		this->inputs2->GetArray(SmbAiniEnum,this->lid,&aini,&m);
+		this->inputs2->GetArray(SmbTiniEnum,this->lid,&Tini,&m);
+		EC_input = this->GetInput2(SmbECiniEnum);  _assert_(EC_input);
+		EC_input->GetInputAverage(&EC);
+
+		/*Retrieve the correct value of m (without the zeroes at the end)*/
+		this->GetInput2Value(&m,SmbSizeiniEnum);
 
 		if(m==2){ //Snow properties are initialized with default values. Vertical grid has to be initialized too
@@ -3854,11 +3759,11 @@
 			GembgridInitialize(&dz, &m, zTop, dzTop, zMax, zY);
 
-			d = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)d[i]=dini[0]; //ice density [kg m-3]
-			re = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)re[i]=reini[0];         //set grain size to old snow [mm]
-			gdn = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)gdn[i]=gdnini[0];         //set grain dentricity to old snow
-			gsp = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)gsp[i]=gspini[0];         //set grain sphericity to old snow
-			W = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)W[i]=Wini[0];             //set water content to zero [kg m-2]
-			a = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)a[i]=aini[0];         //set albedo equal to fresh snow [fraction]
-			T = xNewZeroInit<IssmDouble>(m); for(int i=0;i<m;i++)T[i]=Tmean;         //set initial grid cell temperature to the annual mean temperature [K]
+			d = xNew<IssmDouble>(m); for(int i=0;i<m;i++)d[i]=dini[0]; //ice density [kg m-3]
+			re = xNew<IssmDouble>(m); for(int i=0;i<m;i++)re[i]=reini[0];         //set grain size to old snow [mm]
+			gdn = xNew<IssmDouble>(m); for(int i=0;i<m;i++)gdn[i]=gdnini[0];         //set grain dentricity to old snow
+			gsp = xNew<IssmDouble>(m); for(int i=0;i<m;i++)gsp[i]=gspini[0];         //set grain sphericity to old snow
+			W = xNew<IssmDouble>(m); for(int i=0;i<m;i++)W[i]=Wini[0];             //set water content to zero [kg m-2]
+			a = xNew<IssmDouble>(m); for(int i=0;i<m;i++)a[i]=aini[0];         //set albedo equal to fresh snow [fraction]
+			T = xNew<IssmDouble>(m); for(int i=0;i<m;i++)T[i]=Tmean;         //set initial grid cell temperature to the annual mean temperature [K]
 			/*/!\ Default value of T can not be retrived from SMBgemb.m (like other snow properties)
 			 *    because don't know Tmean yet when set default values.
@@ -3871,46 +3776,37 @@
 			//            if(VerboseSmb() && this->Sid()==0)_printf0_("Snow properties initialized w RESTART values\n");
 
-			dz = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)dz[i]=dzini[i];
-			d = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)d[i]=dini[i];
-			re = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)re[i]=reini[i];
-			gdn = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)gdn[i]=gdnini[i];
-			gsp = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)gsp[i]=gspini[i];
-			W = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)W[i]=Wini[i];
-			a = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)a[i]=aini[i];
-			T = xNewZeroInit<IssmDouble>(m);for(int i=0;i<m;i++)T[i]=Tini[i];
+			dz = xNew<IssmDouble>(m);for(int i=0;i<m;i++)dz[i]=dzini[i];
+			d = xNew<IssmDouble>(m);for(int i=0;i<m;i++)d[i]=dini[i];
+			re = xNew<IssmDouble>(m);for(int i=0;i<m;i++)re[i]=reini[i];
+			gdn = xNew<IssmDouble>(m);for(int i=0;i<m;i++)gdn[i]=gdnini[i];
+			gsp = xNew<IssmDouble>(m);for(int i=0;i<m;i++)gsp[i]=gspini[i];
+			W = xNew<IssmDouble>(m);for(int i=0;i<m;i++)W[i]=Wini[i];
+			a = xNew<IssmDouble>(m);for(int i=0;i<m;i++)a[i]=aini[i];
+			T = xNew<IssmDouble>(m);for(int i=0;i<m;i++)T[i]=Tini[i];
 
 			//fixed lower temperature bounday condition - T is fixed
+			_assert_(m>0);
 			T_bottom=T[m-1];
 		}
 
 		/*Flag the initialization:*/
-		this->AddInput(new DoubleInput(SmbIsInitializedEnum,1.0));
+		this->SetBoolInput(this->inputs2,SmbIsInitializedEnum,true);
 	}
 	else{
 		/*Recover inputs: */
-		DoubleArrayInput* dz_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbDzEnum)); _assert_(dz_input);
-		DoubleArrayInput* d_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbDEnum));_assert_(d_input);
-		DoubleArrayInput* re_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbReEnum));_assert_(re_input);
-		DoubleArrayInput* gdn_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbGdnEnum));_assert_(gdn_input);
-		DoubleArrayInput* gsp_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbGspEnum));_assert_(gsp_input);
-		DoubleInput* EC_input= dynamic_cast<DoubleInput*>(this->GetInput(SmbECEnum));_assert_(EC_input);
-		DoubleArrayInput* W_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbWEnum));_assert_(W_input);
-		DoubleArrayInput* a_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbAEnum));_assert_(a_input);
-		DoubleArrayInput* T_input= dynamic_cast<DoubleArrayInput*>(this->GetInput(SmbTEnum));_assert_(T_input);
-
-		/*Recover arrays: */
-		dz_input->GetValues(&dz,&m);
-		d_input->GetValues(&d,&m);
-		re_input->GetValues(&re,&m);
-		gdn_input->GetValues(&gdn,&m);
-		gsp_input->GetValues(&gsp,&m);
-		EC_input->GetInputValue(&EC);
-		W_input->GetValues(&W,&m);
-		a_input->GetValues(&a,&m);
-		T_input->GetValues(&T,&m);
+		this->inputs2->GetArray(SmbDzEnum,this->lid,&dz,&m);
+		this->inputs2->GetArray(SmbDEnum,this->lid,&d,&m);
+		this->inputs2->GetArray(SmbReEnum,this->lid,&re,&m);
+		this->inputs2->GetArray(SmbGdnEnum,this->lid,&gdn,&m);
+		this->inputs2->GetArray(SmbGspEnum,this->lid,&gsp,&m);
+		this->inputs2->GetArray(SmbWEnum,this->lid,&W,&m);
+		this->inputs2->GetArray(SmbAEnum,this->lid,&a,&m);
+		this->inputs2->GetArray(SmbTEnum,this->lid,&T,&m);
+		EC_input = this->GetInput2(SmbECDtEnum);  _assert_(EC_input);
+		EC_input->GetInputAverage(&EC);
 
 		//fixed lower temperature bounday condition - T is fixed
+		_assert_(m>0);
 		T_bottom=T[m-1];
-
 	} /*}}}*/
 
@@ -3920,163 +3816,190 @@
 	// initialize cumulative variables
 	sumR = 0; sumM = 0; sumEC = 0; sumP = 0; sumMassAdd = 0;
-	sumdz_add=0;
 
 	//before starting loop, realize that the transient core runs this smb_core at time = time +deltaT.
-	//go back to time - deltaT:
-	time-=dt;
-
-	timeclim=time;
-	if (isclimatology){
-		//If this is a climatology, we need to repeat the forcing after the final time
-		offsetend=dynamic_cast<TransientInput*>(Ta_input)->GetTimeInputOffset(finaltime);
-		time0=dynamic_cast<TransientInput*>(Ta_input)->GetTimeByOffset(-1);
-		timeend=dynamic_cast<TransientInput*>(Ta_input)->GetTimeByOffset(offsetend);
-		if (time>time0 & timeend>time0){
-			delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-			timeclim=time0+delta;
-		}
-	}
-
-	/*Start loop: */
-	count=1;
-	for (t=time;t<time+dt;t=t+smb_dt){
-
-		if(VerboseSmb() && this->Sid()==0 && IssmComm::GetRank()==0)_printf0_("Time: t=" << setprecision(8) << t/365.0/24.0/3600.0 << " yr/" << (time+dt)/365.0/24.0/3600.0 << " yr" << setprecision(3) << " Step: " << count << "\n");
-
-		/*extract daily data:{{{*/
-		Ta_input->GetInputValue(&Ta,gauss,t-time+timeclim);//screen level air temperature [K]
-		V_input->GetInputValue(&V,gauss,t-time+timeclim);  //wind speed [m s-1]
-		Dlwr_input->GetInputValue(&dlw,gauss,t-time+timeclim);   //downward longwave radiation flux [W m-2]
-		Dswr_input->GetInputValue(&dsw,gauss,t-time+timeclim);   //downward shortwave radiation flux [W m-2]
-		P_input->GetInputValue(&P,gauss,t-time+timeclim);        //precipitation [kg m-2]
-		eAir_input->GetInputValue(&eAir,gauss,t-time+timeclim);  //screen level vapor pressure [Pa]
-		pAir_input->GetInputValue(&pAir,gauss,t-time+timeclim);  // screen level air pressure [Pa]
-		teValue_input->GetInputValue(&teValue,gauss);  // Emissivity [0-1]
-		aValue_input->GetInputValue(&aValue,gauss);  // screen level air pressure [Pa]
-		//_printf_("Time: " << t << " Ta: " << Ta << " V: " << V << " dlw: " << dlw << " dsw: " << dsw << " P: " << P << " eAir: " << eAir << " pAir: " << pAir << "\n");
-		/*}}}*/
-
-		/*Snow grain metamorphism:*/
-		if(isgraingrowth)grainGrowth(&re, &gdn, &gsp, T, dz, d, W, smb_dt, m, aIdx,this->Sid());
-
-		/*Snow, firn and ice albedo:*/
-		if(isalbedo)albedo(&a,aIdx,re,d,cldFrac,aIce,aSnow,aValue,adThresh,T,W,P,EC,t0wet,t0dry,K,smb_dt,rho_ice,m,this->Sid());
-
-		/*Distribution of absorbed short wave radation with depth:*/
-		if(isshortwave)shortwave(&swf, swIdx, aIdx, dsw, a[0], d, dz, re,rho_ice,m,this->Sid());
-
-		/*Calculate net shortwave [W m-2]*/
-		netSW = netSW + cellsum(swf,m)*smb_dt/dt;
-
-		/*Thermal profile computation:*/
-		if(isthermal)thermo(&EC, &T, &ulw, dz, d, swf, dlw, Ta, V, eAir, pAir, teValue, W[0], smb_dt, m, Vz, Tz, thermo_scaling,rho_ice,this->Sid());
-
-		/*Change in thickness of top cell due to evaporation/condensation  assuming same density as top cell.
-		 * need to fix this in case all or more of cell evaporates */
-		dz[0] = dz[0] + EC / d[0];
-
-		/*Add snow/rain to top grid cell adjusting cell depth, temperature and density*/
-		if(isaccumulation)accumulation(&T, &dz, &d, &W, &a, &re, &gdn, &gsp, &m, aIdx, dsnowIdx, Tmean, Ta, P, dzMin, aSnow, C, V, Vmean, rho_ice,this->Sid());
-
-		/*Calculate water production, M [kg m-2] resulting from snow/ice temperature exceeding 273.15 deg K
-		 * (> 0 deg C), runoff R [kg m-2] and resulting changes in density and determine wet compaction [m]*/
-		if(ismelt)melt(&M, &R, &mAdd, &dz_add, &T, &d, &dz, &W, &a, &re, &gdn, &gsp, &m, dzMin, zMax, zMin, zTop,rho_ice,this->Sid());
-
-		/*Allow non-melt densification and determine compaction [m]*/
-		if(isdensification)densification(&d,&dz, T, re, denIdx, C, smb_dt, Tmean,rho_ice,m,this->Sid());
-
-		/*Calculate upward longwave radiation flux [W m-2] not used in energy balance. Calculated for every
-		 * sub-time step in thermo equations*/
-		//ulw = 5.67E-8 * pow(T[0],4.0) * teValue; // + deltatest here
-
-		/*Calculate net longwave [W m-2]*/
-		meanULW = meanULW + ulw*smb_dt/dt;
-		netLW = netLW + (dlw - ulw)*smb_dt/dt;
-
-		/*Calculate turbulent heat fluxes [W m-2]*/
-		if(isturbulentflux)turbulentFlux(&shf, &lhf, &dayEC, Ta, T[0], V, eAir, pAir, d[0], W[0], Vz, Tz,rho_ice,this->Sid());
-
-		/*Verbose some results in debug mode: {{{*/
-		if(VerboseSmb() && 0){
-			_printf_("smb log: count[" << count << "] m[" << m << "] "
-						<< setprecision(16)   << "T[" << cellsum(T,m)  << "] "
-						<< "d[" << cellsum(d,m)  << "] "
-						<< "dz[" << cellsum(dz,m)  << "] "
-						<< "a[" << cellsum(a,m)  << "] "
-						<< "W[" << cellsum(W,m)  << "] "
-						<< "re[" << cellsum(re,m)  << "] "
-						<< "gdn[" << cellsum(gdn,m)  << "] "
-						<< "gsp[" << cellsum(gsp,m)  << "] "
-						<< "swf[" << netSW << "] "
-						<< "lwf[" << netLW << "] "
-						<< "a[" << a << "] "
-						<< "te[" << teValue << "] "
-						<< "\n");
-		} /*}}}*/
-
-		meanLHF = meanLHF + lhf*smb_dt/dt;
-		meanSHF = meanSHF + shf*smb_dt/dt;
-
-		/*Sum component mass changes [kg m-2]*/
-		sumMassAdd = mAdd + sumMassAdd;
-		sumM = M + sumM;
-		sumR = R + sumR;
-		sumW = cellsum(W,m);
-		sumP = P +  sumP;
-		sumEC = sumEC + EC;  // evap (-)/cond(+)
-
-		/*Calculate total system mass:*/
-		sumMass=0;
-		fac=0;
-		for(int i=0;i<m;i++){
-			sumMass += dz[i]*d[i];
-			fac += dz[i]*(rho_ice - fmin(d[i],rho_ice));
-		}
-
-		#if defined(_HAVE_AD_)
-		/*we want to avoid the round operation at all cost. Not differentiable.*/
-		_error_("not implemented yet");
-		#else
-		dMass = sumMass + sumR + sumW - sumP - sumEC - initMass - sumMassAdd;
-		dMass = round(dMass * 100.0)/100.0;
-
-		/*Check mass conservation:*/
-		if (dMass != 0.0) _printf_("total system mass not conserved in MB function \n");
-		#endif
-
-		/*Check bottom grid cell T is unchanged:*/
-		if(VerboseSmb() && this->Sid()==0 && IssmComm::GetRank()==0){
-			if (T[m-1]!=T_bottom) _printf_("T(end)~=T_bottom" << "\n");
-		}
-
-		/*Free ressources: */
-		xDelete<IssmDouble>(swf);
-
-		/*increase counter:*/
-		count++;
-	} //for (t=time;t<time+dt;t=t+smb_dt)
+   //go back to time - deltaT:
+   time-=dt;
+
+	if(VerboseSmb() && this->Sid()==0 && IssmComm::GetRank()==0)_printf0_("Time: t=" << setprecision(8) << timeinputs/365.0/24.0/3600.0 << " yr/" << (time+dt)/365.0/24.0/3600.0 << " yr" << setprecision(3) << " Step: " << count << "\n");
+
+	/*Get daily accumulated inputs {{{*/
+	if (count>1){
+		Input2 *sumEC_input         = this->GetInput2(SmbECEnum);  _assert_(sumEC_input);
+		Input2 *sumM_input          = this->GetInput2(SmbMeltEnum);  _assert_(sumM_input);
+		Input2 *sumR_input          = this->GetInput2(SmbRunoffEnum);  _assert_(sumR_input);
+		Input2 *sumP_input          = this->GetInput2(SmbPrecipitationEnum);  _assert_(sumP_input);
+		Input2 *ULW_input           = this->GetInput2(SmbMeanULWEnum);  _assert_(ULW_input);
+		Input2 *LW_input            = this->GetInput2(SmbNetLWEnum);  _assert_(LW_input);
+		Input2 *SW_input            = this->GetInput2(SmbNetSWEnum);  _assert_(SW_input);
+		Input2 *LHF_input           = this->GetInput2(SmbMeanLHFEnum);  _assert_(LHF_input);
+		Input2 *SHF_input           = this->GetInput2(SmbMeanSHFEnum);  _assert_(SHF_input);
+		Input2 *DzAdd_input         = this->GetInput2(SmbDzAddEnum);  _assert_(DzAdd_input);
+		Input2 *MassAdd_input       = this->GetInput2(SmbMAddEnum);  _assert_(MassAdd_input);
+		Input2 *InitMass_input      = this->GetInput2(SmbMInitnum);  _assert_(InitMass_input);
+
+		ULW_input->GetInputAverage(&meanULW);
+		LW_input->GetInputAverage(&netLW);
+		SW_input->GetInputAverage(&netSW);
+		LHF_input->GetInputAverage(&meanLHF);
+		SHF_input->GetInputAverage(&meanSHF);
+		DzAdd_input->GetInputAverage(&dz_add);
+		MassAdd_input->GetInputAverage(&sumMassAdd);
+		sumMassAdd=sumMassAdd*dt;
+		InitMass_input->GetInputAverage(&initMass);
+		sumEC_input->GetInputAverage(&sumEC);
+		sumEC=sumEC*dt*rho_ice;
+		sumM_input->GetInputAverage(&sumM);
+		sumM=sumM*dt*rho_ice;
+		sumR_input->GetInputAverage(&sumR);
+		sumR=sumR*dt*rho_ice;
+		sumP_input->GetInputAverage(&sumP);
+		sumP=sumP*dt*rho_ice;
+	}
+	/*}}}*/
+
+	// Get time forcing inputs
+	Input2 *Ta_input  = this->GetInput2(SmbTaEnum,timeinputs);    _assert_(Ta_input);
+	Input2 *V_input   = this->GetInput2(SmbVEnum,timeinputs);     _assert_(V_input);
+	Input2 *Dlwr_input= this->GetInput2(SmbDlwrfEnum,timeinputs); _assert_(Dlwr_input);
+	Input2 *Dswr_input= this->GetInput2(SmbDswrfEnum,timeinputs); _assert_(Dswr_input);
+	Input2 *P_input   = this->GetInput2(SmbPEnum,timeinputs);     _assert_(P_input);
+	Input2 *eAir_input= this->GetInput2(SmbEAirEnum,timeinputs);  _assert_(eAir_input);
+	Input2 *pAir_input= this->GetInput2(SmbPAirEnum,timeinputs);  _assert_(pAir_input);
+	Input2 *teValue_input= this->GetInput2(SmbTeValueEnum,timeinputs); _assert_(teValue_input);
+	Input2 *aValue_input= this->GetInput2(SmbAValueEnum,timeinputs); _assert_(aValue_input);
+
+	/*extract daily data:{{{*/
+	Ta_input->GetInputValue(&Ta,gauss);//screen level air temperature [K]
+	V_input->GetInputValue(&V,gauss);  //wind speed [m s-1]
+	Dlwr_input->GetInputValue(&dlw,gauss);   //downward longwave radiation flux [W m-2]
+	Dswr_input->GetInputValue(&dsw,gauss);   //downward shortwave radiation flux [W m-2]
+	P_input->GetInputValue(&P,gauss);        //precipitation [kg m-2]
+	eAir_input->GetInputValue(&eAir,gauss);  //screen level vapor pressure [Pa]
+	pAir_input->GetInputValue(&pAir,gauss);  // screen level air pressure [Pa]
+	teValue_input->GetInputValue(&teValue,gauss);  // Emissivity [0-1]
+	aValue_input->GetInputValue(&aValue,gauss);  // Albedo [0 1]
+	//_printf_("Time: " << t << " Ta: " << Ta << " V: " << V << " dlw: " << dlw << " dsw: " << dsw << " P: " << P << " eAir: " << eAir << " pAir: " << pAir << "\n");
+	/*}}}*/
+
+	/*Snow grain metamorphism:*/
+	if(isgraingrowth)grainGrowth(&re, &gdn, &gsp, T, dz, d, W, smb_dt, m, aIdx,this->Sid());
+
+	/*Snow, firn and ice albedo:*/
+	if(isalbedo)albedo(&a,aIdx,re,d,cldFrac,aIce,aSnow,aValue,adThresh,T,W,P,EC,t0wet,t0dry,K,smb_dt,rho_ice,m,this->Sid());
+
+	/*Distribution of absorbed short wave radation with depth:*/
+	if(isshortwave)shortwave(&swf, swIdx, aIdx, dsw, a[0], d, dz, re,rho_ice,m,this->Sid());
+
+	/*Calculate net shortwave [W m-2]*/
+	netSW = netSW + cellsum(swf,m)*smb_dt/dt;
+
+	/*Thermal profile computation:*/
+	if(isthermal)thermo(&EC, &T, &ulw, dz, d, swf, dlw, Ta, V, eAir, pAir, teValue, W[0], smb_dt, m, Vz, Tz, thermo_scaling,rho_ice,this->Sid());
+
+	/*Change in thickness of top cell due to evaporation/condensation  assuming same density as top cell.
+	 * need to fix this in case all or more of cell evaporates */
+	dz[0] = dz[0] + EC / d[0];
+
+	/*Add snow/rain to top grid cell adjusting cell depth, temperature and density*/
+	if(isaccumulation)accumulation(&T, &dz, &d, &W, &a, &re, &gdn, &gsp, &m, aIdx, dsnowIdx, Tmean, Ta, P, dzMin, aSnow, C, V, Vmean, rho_ice,this->Sid());
+
+	/*Calculate water production, M [kg m-2] resulting from snow/ice temperature exceeding 273.15 deg K
+	 * (> 0 deg C), runoff R [kg m-2] and resulting changes in density and determine wet compaction [m]*/
+	if(ismelt)melt(&M, &R, &mAdd, &dz_add, &T, &d, &dz, &W, &a, &re, &gdn, &gsp, &m, dzMin, zMax, zMin, zTop,rho_ice,this->Sid());
+
+	/*Allow non-melt densification and determine compaction [m]*/
+	if(isdensification)densification(&d,&dz, T, re, denIdx, C, smb_dt, Tmean,rho_ice,m,this->Sid());
+
+	/*Calculate upward longwave radiation flux [W m-2] not used in energy balance. Calculated for every
+	 * sub-time step in thermo equations*/
+	//ulw = 5.67E-8 * pow(T[0],4.0) * teValue; // + deltatest here
+
+	/*Calculate net longwave [W m-2]*/
+	meanULW = meanULW + ulw*smb_dt/dt;
+	netLW = netLW + (dlw - ulw)*smb_dt/dt;
+
+	/*Calculate turbulent heat fluxes [W m-2]*/
+	if(isturbulentflux)turbulentFlux(&shf, &lhf, &dayEC, Ta, T[0], V, eAir, pAir, d[0], W[0], Vz, Tz,rho_ice,this->Sid());
+
+	/*Verbose some results in debug mode: {{{*/
+	if(VerboseSmb() && 0){
+		_printf_("smb log: count[" << count << "] m[" << m << "] "
+					<< setprecision(16)   << "T[" << cellsum(T,m)  << "] "
+					<< "d[" << cellsum(d,m)  << "] "
+					<< "dz[" << cellsum(dz,m)  << "] "
+					<< "a[" << cellsum(a,m)  << "] "
+					<< "W[" << cellsum(W,m)  << "] "
+					<< "re[" << cellsum(re,m)  << "] "
+					<< "gdn[" << cellsum(gdn,m)  << "] "
+					<< "gsp[" << cellsum(gsp,m)  << "] "
+					<< "swf[" << netSW << "] "
+					<< "lwf[" << netLW << "] "
+					<< "a[" << a << "] "
+					<< "te[" << teValue << "] "
+					<< "\n");
+	} /*}}}*/
+
+	meanLHF = meanLHF + lhf*smb_dt/dt;
+	meanSHF = meanSHF + shf*smb_dt/dt;
+
+	/*Sum component mass changes [kg m-2]*/
+	sumMassAdd = mAdd + sumMassAdd;
+	sumM = M + sumM;
+	sumR = R + sumR;
+	sumW = cellsum(W,m);
+	sumP = P +  sumP;
+	sumEC = sumEC + EC;  // evap (-)/cond(+)
+
+	/*Calculate total system mass:*/
+	sumMass=0;
+	fac=0;
+	for(int i=0;i<m;i++){
+		sumMass += dz[i]*d[i];
+		fac += dz[i]*(rho_ice - fmin(d[i],rho_ice));
+	}
+
+	#if defined(_HAVE_AD_)
+	/*we want to avoid the round operation at all cost. Not differentiable.*/
+	_error_("not implemented yet");
+	#else
+	dMass = sumMass + sumR + sumW - sumP - sumEC - initMass - sumMassAdd;
+	dMass = round(dMass * 100.0)/100.0;
+
+	/*Check mass conservation:*/
+	if (dMass != 0.0){
+		_printf_("total system mass not conserved in MB function \n");
+	}
+	#endif
+
+	/*Check bottom grid cell T is unchanged:*/
+	if(VerboseSmb() && this->Sid()==0 && IssmComm::GetRank()==0){
+		if (T[m-1]!=T_bottom) _printf_("T(end)~=T_bottom" << "\n");
+	}
 
 	/*Save generated inputs: */
-	this->AddInput(new DoubleArrayInput(SmbDzEnum,dz,m));
-	this->AddInput(new DoubleArrayInput(SmbDEnum,d,m));
-	this->AddInput(new DoubleArrayInput(SmbReEnum,re,m));
-	this->AddInput(new DoubleArrayInput(SmbGdnEnum,gdn,m));
-	this->AddInput(new DoubleArrayInput(SmbGspEnum,gsp,m));
-	this->AddInput(new DoubleArrayInput(SmbTEnum,T,m));
-	this->AddInput(new DoubleInput(SmbECEnum,sumEC/dt/rho_ice));
-	this->AddInput(new DoubleArrayInput(SmbWEnum,W,m));
-	this->AddInput(new DoubleArrayInput(SmbAEnum,a,m));
-	this->AddInput(new DoubleInput(SmbMassBalanceEnum,(sumP + sumEC -sumR)/dt/rho_ice));
-	this->AddInput(new DoubleInput(SmbMeltEnum,sumM/dt/rho_ice));
-	this->AddInput(new DoubleInput(SmbRunoffEnum,sumR/dt/rho_ice));
-	this->AddInput(new DoubleInput(SmbPrecipitationEnum,sumP/dt/rho_ice));
-	this->AddInput(new DoubleInput(SmbMeanULWEnum,meanULW));
-	this->AddInput(new DoubleInput(SmbNetLWEnum,netLW));
-	this->AddInput(new DoubleInput(SmbNetSWEnum,netSW));
-	this->AddInput(new DoubleInput(SmbMeanLHFEnum,meanLHF));
-	this->AddInput(new DoubleInput(SmbMeanSHFEnum,meanSHF));
-	this->AddInput(new DoubleInput(SmbDzAddEnum,sumdz_add));
-	this->AddInput(new DoubleInput(SmbMAddEnum,sumMassAdd/dt));
-	this->AddInput(new DoubleInput(SmbFACEnum,fac/1000)); // output in meters
+	this->inputs2->SetArrayInput(SmbDzEnum,this->lid,dz,m);
+	this->inputs2->SetArrayInput(SmbDEnum,this->lid,d,m);
+	this->inputs2->SetArrayInput(SmbReEnum,this->lid,re,m);
+	this->inputs2->SetArrayInput(SmbGdnEnum,this->lid,gdn,m);
+	this->inputs2->SetArrayInput(SmbGspEnum,this->lid,gsp,m);
+	this->inputs2->SetArrayInput(SmbTEnum,this->lid,T,m);
+	this->inputs2->SetArrayInput(SmbWEnum,this->lid,W,m);
+	this->inputs2->SetArrayInput(SmbAEnum,this->lid,a,m);
+	this->SetElementInput(SmbECEnum,sumEC/dt/rho_ice);
+	this->SetElementInput(SmbMassBalanceEnum,(sumP + sumEC -sumR)/dt/rho_ice);
+	this->SetElementInput(SmbMeltEnum,sumM/dt/rho_ice);
+	this->SetElementInput(SmbRunoffEnum,sumR/dt/rho_ice);
+	this->SetElementInput(SmbPrecipitationEnum,sumP/dt/rho_ice);
+	this->SetElementInput(SmbMeanULWEnum,meanULW);
+	this->SetElementInput(SmbNetLWEnum,netLW);
+	this->SetElementInput(SmbNetSWEnum,netSW);
+	this->SetElementInput(SmbMeanLHFEnum,meanLHF);
+	this->SetElementInput(SmbMeanSHFEnum,meanSHF);
+	this->SetElementInput(SmbDzAddEnum,dz_add);
+	this->SetElementInput(SmbMInitnum,initMass);
+	this->SetElementInput(SmbMAddEnum,sumMassAdd/dt);
+	this->SetElementInput(SmbWAddEnum,sumW/dt);
+	this->SetElementInput(SmbFACEnum,fac/1000.); // output in meters
+	this->SetElementInput(SmbECDtEnum,EC);
 
 	/*Free allocations:{{{*/
@@ -4097,4 +4020,5 @@
 	if(aini) xDelete<IssmDouble>(aini);
 	if(Tini) xDelete<IssmDouble>(Tini);
+	if(swf) xDelete<IssmDouble>(swf);
 
 	delete gauss;
@@ -4102,5 +4026,5 @@
 }
 /*}}}*/
-void       Element::StrainRateESA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::StrainRateESA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -4122,5 +4046,5 @@
 
 }/*}}}*/
-void       Element::StrainRateFS(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){/*{{{*/
+void       Element::StrainRateFS(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){/*{{{*/
 	/*Compute the 3d Strain Rate (6 components):
 	 *
@@ -4142,4 +4066,5 @@
 	vy_input->GetInputDerivativeValue(&dvy[0],xyz_list,gauss);
 	vz_input->GetInputDerivativeValue(&dvz[0],xyz_list,gauss);
+
 	epsilon[0] = dvx[0];
 	epsilon[1] = dvy[1];
@@ -4150,5 +4075,5 @@
 
 }/*}}}*/
-void       Element::StrainRateHO(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::StrainRateHO(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 	/*Compute the 3d Blatter/HOStrain Rate (5 components):
 	 *
@@ -4180,5 +4105,5 @@
 
 }/*}}}*/
-void       Element::StrainRateHO2dvertical(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::StrainRateHO2dvertical(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 	/*Compute the 2d Blatter/HOStrain Rate (2 components):
 	 *
@@ -4204,5 +4129,5 @@
 
 }/*}}}*/
-void       Element::StrainRateSSA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void       Element::StrainRateSSA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -4223,5 +4148,5 @@
 
 }/*}}}*/
-void       Element::StrainRateSSA1d(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input){/*{{{*/
+void       Element::StrainRateSSA1d(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -4259,14 +4184,14 @@
 	/*Retrieve all inputs and parameters*/
 	this->GetVerticesCoordinatesBase(&xyz_list);
-	Input* sigma_xx_input  = this->GetInput(StressTensorxxEnum); _assert_(sigma_xx_input);
-	Input* sigma_yy_input  = this->GetInput(StressTensoryyEnum); _assert_(sigma_yy_input);
-	Input* sigma_xy_input  = this->GetInput(StressTensorxyEnum); _assert_(sigma_xy_input);
-	Input* sigma_xz_input  = NULL;
-	Input* sigma_yz_input  = NULL;
-	Input* sigma_zz_input  = NULL;
+	Input2* sigma_xx_input  = this->GetInput2(StressTensorxxEnum); _assert_(sigma_xx_input);
+	Input2* sigma_yy_input  = this->GetInput2(StressTensoryyEnum); _assert_(sigma_yy_input);
+	Input2* sigma_xy_input  = this->GetInput2(StressTensorxyEnum); _assert_(sigma_xy_input);
+	Input2* sigma_xz_input  = NULL;
+	Input2* sigma_yz_input  = NULL;
+	Input2* sigma_zz_input  = NULL;
 	if(dim==3){
-		sigma_xz_input  = this->GetInput(StressTensorxzEnum); _assert_(sigma_xz_input);
-		sigma_yz_input  = this->GetInput(StressTensoryzEnum); _assert_(sigma_yz_input);
-		sigma_zz_input  = this->GetInput(StressTensorzzEnum); _assert_(sigma_zz_input);
+		sigma_xz_input  = this->GetInput2(StressTensorxzEnum); _assert_(sigma_xz_input);
+		sigma_yz_input  = this->GetInput2(StressTensoryzEnum); _assert_(sigma_yz_input);
+		sigma_zz_input  = this->GetInput2(StressTensorzzEnum); _assert_(sigma_zz_input);
 	}
 
@@ -4318,5 +4243,5 @@
 
 	/*Create input*/
-	this->AddInput(StressMaxPrincipalEnum,maxprincipal,P1Enum);
+	this->AddInput2(StressMaxPrincipalEnum,maxprincipal,P1Enum);
 
 	/*Clean up and return*/
@@ -4602,7 +4527,7 @@
 	/*Retrieve all inputs and parameters*/
 	this->GetVerticesCoordinates(&xyz_list);
-	Input* vx_input        = this->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input        = this->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input        = this->GetInput(VzEnum); _assert_(vz_input);
+	Input2* vx_input = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input = this->GetInput2(VzEnum); _assert_(vz_input);
 
 	/*loop over vertices: */
@@ -4617,5 +4542,5 @@
 
 	/*Create PentaVertex input, which will hold the basal friction:*/
-	this->AddInput(ViscousHeatingEnum,viscousheating,P1Enum);
+	this->AddInput2(ViscousHeatingEnum,viscousheating,P1Enum);
 
 	/*Clean up and return*/
Index: /issm/trunk/src/c/classes/Elements/Element.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Element.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Element.h	(revision 24686)
@@ -23,6 +23,10 @@
 class Materials;
 class Material;
+class Inputs2;
 class Inputs;
+class Input2;
 class Input;
+class ElementInput2;
+class DatasetInput2;
 class IoModel;
 class Gauss;
@@ -39,9 +43,12 @@
 		int          id;
 		int          sid;
-		Inputs      *inputs;
+		int          lid;
+		Inputs2     *inputs2;
 		Node       **nodes;
 		Vertex     **vertices;
 		Material    *material;
 		Parameters  *parameters;
+		bool         isonsurface;
+		bool         isonbase;
 
 		int* element_type_list;
@@ -54,7 +61,7 @@
 
 		/*Functions*/
-		void               AddInput(Input* input_in);
 		/*bool               AllActive(void);*/
 		/*bool               AnyActive(void);*/
+		bool               AnyFSet(void);
 		void               ComputeLambdaS(void);
 		void               ComputeNewDamage();
@@ -62,5 +69,4 @@
 		void               CoordinateSystemTransform(IssmDouble** ptransform,Node** nodes,int numnodes,int* cs_array);
 		void               DeepEcho();
-		void               DeleteInput(int input_enum);
 		void               DeleteMaterials(void);
 		void               Delta18oParameterization(void);
@@ -68,8 +74,8 @@
 		void               SmbGradCompParameterization(void);
 		IssmDouble         Divergence(void);
-		void               dViscositydBFS(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input);
-		void               dViscositydBHO(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
-		void               dViscositydBSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
-		void               dViscositydDSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
+		void               dViscositydBFS(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input);
+		void               dViscositydBHO(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void               dViscositydBSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void               dViscositydDSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
 		void               Echo();
 		void               FindParam(bool* pvalue,int paramenum);
@@ -85,5 +91,4 @@
 		void	             GetDofListLocalPressure(int** pdoflist,int setenum);
 		void	             GetDofListLocalVelocity(int** pdoflist,int setenum);
-		Input*             GetInput(int inputenum);
 		void               GetInputListOnNodes(IssmDouble* pvalue,int enumtype);
 		void               GetInputListOnNodes(IssmDouble* pvalue,int enumtype,IssmDouble defaultvalue);
@@ -95,7 +100,8 @@
 		void               GetInputValue(bool* pvalue,int enum_type);
 		void               GetInputValue(int* pvalue,int enum_type);
+		void               GetInput2Value(bool* pvalue,int enum_type);
+		void               GetInput2Value(int* pvalue,int enum_type);
 		void               GetInputValue(IssmDouble* pvalue,int enum_type);
 		void               GetInputValue(IssmDouble* pvalue,Gauss* gauss,int enum_type);
-		void               GetInputsInterpolations(Vector<IssmDouble>* interps);
 		Node*              GetNode(int nodeindex);
 		int                GetNodeIndex(Node* node);
@@ -123,9 +129,7 @@
 		IssmDouble         IceVolumeAboveFloatation(IssmDouble* mask, bool scaled);
 		int                Id();
-		void               InputChangeName(int enum_type,int enum_type_old);
-		void               InputCreate(IssmDouble* vector,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code);
-		void               ControlInputCreate(IssmDouble* doublearray,IssmDouble* independents_min,IssmDouble* independents_max,IoModel* iomodel,int M,int N,int input_enum,int id);
-		void					 DatasetInputAdd(int enum_type,IssmDouble* vector,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code,int input_enum);
-		void               InputDuplicate(int original_enum,int new_enum);
+		void               InputCreate(IssmDouble* vector,Inputs2* inputs2,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code);
+		void               ControlInputCreate(IssmDouble* doublearray,IssmDouble* independents_min,IssmDouble* independents_max,Inputs2*inputs2,IoModel* iomodel,int M,int N,IssmDouble scale,int input_enum,int id);
+		void					 DatasetInputAdd(int enum_type,IssmDouble* vector,Inputs2* inputs2,IoModel* iomodel,int M,int N,int vector_type,int vector_enum,int code,int input_enum);
 		void               InputUpdateFromConstant(IssmDouble constant, int name);
 		void               InputUpdateFromConstant(int constant, int name);
@@ -134,6 +138,10 @@
 		bool               IsFloating();
 		bool               IsGrounded();
+		bool               IsOnBase();
+		bool               IsOnSurface();
 		bool               IsIceInElement();
+		bool               IsIceOnlyInElement();
 		bool               IsLandInElement();
+		bool               IsOceanInElement();
 		void               Ismip6FloatingiceMeltingRate();
 		bool               IsWaterInElement();
@@ -149,7 +157,7 @@
 		ElementMatrix*     NewElementMatrixCoupling(int number_nodes,int approximation_enum=NoneApproximationEnum);
 		ElementVector*     NewElementVector(int approximation_enum=NoneApproximationEnum);
-		void               PicoUpdateBoxid(int* pmax_boxid_basin); 
+		void               PicoUpdateBoxid(int* pmax_boxid_basin);
 		void               PicoUpdateBox(int loopboxid);
-		void               PicoComputeBasalMelt(); 
+		void               PicoComputeBasalMelt();
 		void               PositiveDegreeDay(IssmDouble* pdds,IssmDouble* pds,IssmDouble signorm,bool ismungsm,bool issetpddfac);
 		void               PositiveDegreeDaySicopolis(bool isfirnwarming);
@@ -159,13 +167,16 @@
 		void               ResultToVector(Vector<IssmDouble>* vector,int output_enum);
 		void               SetwiseNodeConnectivity(int* d_nz,int* o_nz,Node* node,bool* flags,int* flagsindices,int set1_enum,int set2_enum);
+		void               SetBoolInput(Inputs2* inputs2,int enum_in,bool value);
+
+		void               SetIntInput(Inputs2* inputs2,int enum_in,int value);
 		void               SmbSemic();
 		int                Sid();
-		void               SmbGemb();
-		void               StrainRateESA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
-		void               StrainRateFS(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input);
-		void               StrainRateHO(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
-		void               StrainRateHO2dvertical(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
-		void               StrainRateSSA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
-		void               StrainRateSSA1d(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input);
+		void               SmbGemb(IssmDouble timeinputs, int count);
+		void               StrainRateESA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void               StrainRateFS(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input);
+		void               StrainRateHO(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void               StrainRateHO2dvertical(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void               StrainRateSSA(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void               StrainRateSSA1d(IssmDouble* epsilon,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input);
 		void               StressMaxPrincipalCreateInput(void);
 		IssmDouble         TotalFloatingBmb(IssmDouble* mask, bool scaled);
@@ -202,8 +213,8 @@
 
 		/*Virtual functions*/
-		virtual void       AddBasalInput(int input_enum, IssmDouble* values, int interpolation_enum)=0;
-		virtual void       AddInput(int input_enum, IssmDouble* values, int interpolation_enum)=0;
-		virtual void       AddControlInput(int input_enum, IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id){_error_("not supported yet");};
-		virtual void       DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,IoModel* iomodel,int input_enum){_error_("not supported");};
+		virtual void       AddBasalInput2(int input_enum, IssmDouble* values, int interpolation_enum){_error_("not implemented");};
+		virtual void       AddInput2(int input_enum, IssmDouble* values, int interpolation_enum){_error_("not implemented");};
+		virtual void       AddControlInput(int input_enum,Inputs2* inputs2,IoModel* iomodel,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id){_error_("not supported yet");};
+		virtual void       DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,Inputs2* inputs2,IoModel* iomodel,int input_enum){_error_("not supported");};
 		virtual void       AverageOntoPartition(Vector<IssmDouble>* partition_contributions,Vector<IssmDouble>* partition_areas,IssmDouble* vertex_response,IssmDouble* qmu_part)=0;
 		virtual void		 BasalNodeIndices(int* pnumindices,int** pindices,int finiteelement){_error_("not implemented yet");};
@@ -219,9 +230,10 @@
 		virtual void       ComputeStressTensor(void)=0;
 		virtual void       ComputeEsaStrainAndVorticity(void)=0;
-		virtual void       Configure(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters)=0;
+		virtual void       Configure(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters,Inputs2* inputs2in)=0;
 		virtual void       ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N,int M)=0;
 		virtual void       ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index)=0;
 		virtual void       ControlToVectors(Vector<IssmPDouble>* vector_control, Vector<IssmPDouble>* vector_gradient,int control_enum)=0;
 		virtual void       CreateDistanceInputFromSegmentlist(IssmDouble* distances,int distanceenum){_error_("not implemented yet");};
+		virtual void       CreateInputTimeAverage(int transientinput_enum,int averagedinput_enum,IssmDouble init_time,IssmDouble end_time){_error_("not implemented yet "<<this->ObjectEnum());};
 		virtual void       ElementResponse(IssmDouble* presponse,int response_enum)=0;
 		virtual void       ElementSizes(IssmDouble* phx,IssmDouble* phy,IssmDouble* phz)=0;
@@ -236,6 +248,12 @@
 		virtual IssmDouble GetIcefrontArea(){_error_("not implemented");};
 		virtual void       GetIcefrontCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum)=0;
+		virtual DatasetInput2* GetDatasetInput2(int inputenum){_error_("not implemented");};
+		virtual Input2*    GetInput2(int inputenum)=0;
+		virtual Input2*    GetInput2(int inputenum,IssmDouble time)=0;
+		virtual Input2*    GetInput2(int inputenum,IssmDouble start_time,IssmDouble end_time)=0;
 		virtual void       GetInputValue(IssmDouble* pvalue,Vertex* vertex,int enumtype){_error_("not implemented yet");};
 		virtual void       GetInputValue(IssmDouble* pvalue,Node* node,int enumtype){_error_("not implemented yet");};
+		virtual void       GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value)=0;
+		virtual void       GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value)=0;
 		virtual void       GetLevelCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum,IssmDouble level)=0;
 		virtual void       GetLevelsetPositivePart(int* point1,IssmDouble* fraction1,IssmDouble* fraction2, bool* mainlynegative,IssmDouble* levelsetvalues)=0;
@@ -255,4 +273,5 @@
 		virtual IssmDouble GroundinglineMassFlux(bool scaled){_error_("not implemented");};
 		virtual void       InputDepthAverageAtBase(int enum_type,int average_enum_type)=0;
+		virtual void       DatasetInputExtrude(int input_enum,int start){_error_("not implemented yet");};
 		virtual void       InputExtrude(int input_enum,int start)=0;
 		virtual void       InputUpdateFromSolutionOneDofCollapsed(IssmDouble* solution,int inputenum)=0;
@@ -267,6 +286,5 @@
 		virtual bool       IsIcefront(void)=0;
 		virtual bool       IsNodeOnShelfFromFlags(IssmDouble* flags)=0;
-		virtual bool       IsOnBase()=0;
-		virtual bool       IsOnSurface()=0;
+
 		virtual bool       IsZeroLevelset(int levelset_enum)=0;
 		virtual void       JacobianDeterminant(IssmDouble*  Jdet, IssmDouble* xyz_list,Gauss* gauss)=0;
@@ -284,8 +302,8 @@
 		virtual Gauss*     NewGauss(void)=0;
 		virtual Gauss*     NewGauss(int order)=0;
-      virtual Gauss*     NewGauss(IssmDouble* xyz_list, IssmDouble* xyz_list_front,int order)=0;
-      virtual Gauss*     NewGauss(IssmDouble* xyz_list, IssmDouble* xyz_list_front,int order_horiz,int order_vert)=0;
-      virtual Gauss*     NewGauss(int point1,IssmDouble fraction1,IssmDouble fraction2,bool mainlyfloating,int order)=0;
-      virtual Gauss*     NewGauss(int point1,IssmDouble fraction1,IssmDouble fraction2,int order)=0;
+		virtual Gauss*     NewGauss(IssmDouble* xyz_list, IssmDouble* xyz_list_front,int order)=0;
+		virtual Gauss*     NewGauss(IssmDouble* xyz_list, IssmDouble* xyz_list_front,int order_horiz,int order_vert)=0;
+		virtual Gauss*     NewGauss(int point1,IssmDouble fraction1,IssmDouble fraction2,bool mainlyfloating,int order)=0;
+		virtual Gauss*     NewGauss(int point1,IssmDouble fraction1,IssmDouble fraction2,int order)=0;
 		virtual Gauss*     NewGaussBase(int order)=0;
 		virtual Gauss*     NewGaussLine(int vertex1,int vertex2,int order)=0;
@@ -313,4 +331,7 @@
 		virtual void       ResetHooks()=0;
 		virtual void       RignotMeltParameterization(void){_error_("not implemented yet");};
+		virtual void       SetElementInput(int enum_in,IssmDouble values){_error_("not implemented yet");};
+		virtual void       SetElementInput(Inputs2* inputs2,int enum_in,IssmDouble values){_error_("not implemented yet");};
+		virtual void       SetElementInput(Inputs2* inputs2,int numindices,int* indices,IssmDouble* values,int enum_in){_error_("not implemented yet");};
 		virtual void       SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index,int offset,int N,int M)=0;
 		virtual void       SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index)=0;
@@ -320,5 +341,5 @@
 		virtual Element*   SpawnTopElement(void)=0;
 		virtual IssmDouble StabilizationParameter(IssmDouble u, IssmDouble v, IssmDouble w, IssmDouble diameter, IssmDouble kappa)=0;
-		virtual void       StabilizationParameterAnisotropic(IssmDouble* tau_parameter_anisotropic, IssmDouble u, IssmDouble v, IssmDouble w, IssmDouble hx, IssmDouble hy, IssmDouble hz, IssmDouble kappa)=0;		
+		virtual void       StabilizationParameterAnisotropic(IssmDouble* tau_parameter_anisotropic, IssmDouble u, IssmDouble v, IssmDouble w, IssmDouble hx, IssmDouble hy, IssmDouble hz, IssmDouble kappa)=0;
 		virtual void	    StrainRateparallel(void)=0;
 		virtual void	    StrainRateperpendicular(void)=0;
@@ -332,5 +353,5 @@
 		virtual IssmDouble TotalGroundedBmb(bool scaled)=0;
 		virtual IssmDouble TotalSmb(bool scaled)=0;
-		virtual void       Update(int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finite_element)=0;
+		virtual void       Update(Inputs2* inputs2,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finite_element)=0;
 		virtual void       UpdateConstraintsExtrudeFromBase(void)=0;
 		virtual void       UpdateConstraintsExtrudeFromTop(void)=0;
@@ -342,5 +363,5 @@
 		virtual void       VerticalSegmentIndices(int** pindices,int* pnumseg)=0;
 		virtual void       VerticalSegmentIndicesBase(int** pindices,int* pnumseg)=0;
-		virtual void       ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input)=0;
+		virtual void       ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){_error_("not implemented yet");};
 		virtual void       WriteFieldIsovalueSegment(DataSet* segments,int fieldenum,IssmDouble fieldvalue){_error_("not implemented yet");};
 
Index: /issm/trunk/src/c/classes/Elements/Elements.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Elements.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Elements.cpp	(revision 24686)
@@ -34,26 +34,14 @@
 
 /*Object management*/
-void Elements::Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){/*{{{*/
+void Elements::Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters,Inputs2* inputs2){/*{{{*/
 
 	vector<Object*>::iterator object;
-	Element* element=NULL;
 
-	for ( object=objects.begin() ; object < objects.end(); object++ ){
-
-		element=xDynamicCast<Element*>((*object));
-		element->Configure(elements,loads,nodes,vertices,materials,parameters);
-
+	for(object=objects.begin() ; object < objects.end(); object++ ){
+		Element* element=xDynamicCast<Element*>((*object));
+		element->Configure(elements,loads,nodes,vertices,materials,parameters,inputs2);
 	}
 
-}
-/*}}}*/
-void Elements::InputDuplicate(int input_enum,int output_enum){/*{{{*/
-
-	for(int i=0;i<this->Size();i++){
-		Element* element=xDynamicCast<Element*>(this->GetObjectByOffset(i));
-		element->InputDuplicate(input_enum,output_enum);
-	}
-}
-/*}}}*/
+}/*}}}*/
 int  Elements::MaxNumNodes(void){/*{{{*/
 
Index: /issm/trunk/src/c/classes/Elements/Elements.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Elements.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Elements.h	(revision 24686)
@@ -24,6 +24,5 @@
 
 		/*numerics*/
-		void   Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters);
-		void   InputDuplicate(int input_enum,int output_enum);
+		void   Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters,Inputs2* inputs2);
 		int    MaxNumNodes(void);
 		int    NumberOfElements(void);
Index: /issm/trunk/src/c/classes/Elements/Penta.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Penta.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Penta.cpp	(revision 24686)
@@ -12,4 +12,8 @@
 
 #include "../classes.h"
+#include "../Inputs2/PentaInput2.h"
+#include "../Inputs2/ControlInput2.h"
+#include "../Inputs2/TransientInput2.h"
+#include "../Inputs2/DatasetInput2.h"
 #include "../../shared/shared.h"
 /*}}}*/
@@ -24,5 +28,5 @@
 }
 /*}}}*/
-Penta::Penta(int penta_id, int penta_sid,IoModel* iomodel,int nummodels)/*{{{*/
+Penta::Penta(int penta_id, int penta_sid,int penta_lid,IoModel* iomodel,int nummodels)/*{{{*/
 	:ElementHook(nummodels,penta_id,NUMVERTICES,iomodel){
 
@@ -36,4 +40,9 @@
 	this->id  = penta_id;
 	this->sid = penta_sid;
+	this->lid = penta_lid;
+
+	/*surface and base*/
+	this->isonsurface = false;
+	this->isonbase    = false;
 
 	/*Build neighbors list*/
@@ -47,7 +56,4 @@
 	this->parameters=NULL;
 
-	/*intialize inputs: */
-	this->inputs=new Inputs();
-
 	/*initialize pointers:*/
 	this->nodes             = NULL;
@@ -58,4 +64,18 @@
 	/*Only allocate pointer*/
 	this->element_type_list=xNew<int>(nummodels);
+
+	/*surface and base*/
+	_assert_(iomodel->Data("md.mesh.vertexonsurface"));
+	_assert_(iomodel->Data("md.mesh.vertexonbase"));
+	this->isonsurface = false;
+	this->isonbase    = false;
+	IssmDouble sum = 0.;
+	for(int i=0;i<NUMVERTICES;i++) sum += iomodel->Data("md.mesh.vertexonsurface")[reCast<int>(iomodel->elements[(penta_id-1)*NUMVERTICES+i])-1];
+	_assert_(sum>=0 && sum<4);
+	if(sum>2.5) this->isonsurface = true;
+	sum = 0.;
+	for(int i=0;i<NUMVERTICES;i++) sum += iomodel->Data("md.mesh.vertexonbase")[reCast<int>(iomodel->elements[(penta_id-1)*NUMVERTICES+i])-1];
+	_assert_(sum>=0 && sum<4);
+	if(sum>2.5) this->isonbase = true;
 }
 /*}}}*/
@@ -98,6 +118,7 @@
 	penta->id  = this->id;
 	penta->sid = this->sid;
-	if(this->inputs) penta->inputs = (Inputs*)(this->inputs->Copy());
-	else penta->inputs=new Inputs();
+	penta->lid = this->lid;
+	penta->isonbase  = this->isonbase;
+	penta->isonsurface  = this->isonsurface;
 
 	/*point parameters: */
@@ -123,4 +144,6 @@
 
 	MARSHALLING_ENUM(PentaEnum);
+	MARSHALLING(this->isonsurface);
+	MARSHALLING(this->isonbase);
 
 	/*Call parent classes: */
@@ -137,21 +160,18 @@
 
 /*Other*/
-void       Penta::AddBasalInput(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
-
-	_assert_(this->inputs);
+void       Penta::AddBasalInput2(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
+
+	_assert_(this->inputs2);
 	if(!IsOnBase()) return;
 	else{
-		if(interpolation_enum==P1Enum){
-			int        i;
+		if(interpolation_enum==P1Enum || interpolation_enum==P1DGEnum){
 			IssmDouble extrudedvalues[NUMVERTICES];
-			Penta*     penta=NULL;
-
-			for(i=0;i<NUMVERTICES2D;i++){
+			for(int i=0;i<NUMVERTICES2D;i++){
 				extrudedvalues[i]=values[i];
 				extrudedvalues[i+NUMVERTICES2D]=values[i];
 			}
-			penta=this;
+			Penta* penta=this;
 			for(;;){
-				penta->inputs->AddInput(new PentaInput(input_enum,&extrudedvalues[0],P1Enum));
+				penta->AddInput2(input_enum,&extrudedvalues[0],interpolation_enum);
 				if (penta->IsOnSurface()) break;
 				penta=penta->GetUpperPenta(); _assert_(penta->Id()!=this->id);
@@ -160,10 +180,74 @@
 		else _error_("not implemented yet");
 	}
-}
-/*}}}*/
-void       Penta::AddInput(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
-
-	_assert_(this->inputs);
-	this->inputs->AddInput(new PentaInput(input_enum,values,interpolation_enum));
+
+}
+/*}}}*/
+void       Penta::AddInput2(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
+
+	/**/
+	int vertexlids[NUMVERTICES];
+
+	/*Call inputs method*/
+	_assert_(this->inputs2);
+	switch(interpolation_enum){
+		case P1Enum:
+			for(int i=0;i<NUMVERTICES;i++) vertexlids[i]=this->vertices[i]->lid;
+			inputs2->SetPentaInput(input_enum,interpolation_enum,NUMVERTICES,vertexlids,values);
+			break;
+		case P1DGEnum:
+			inputs2->SetPentaInput(input_enum,interpolation_enum,this->lid,NUMVERTICES,values);
+			break;
+		default:
+			inputs2->SetPentaInput(input_enum,interpolation_enum,this->lid,this->GetNumberOfNodes(interpolation_enum),values);
+	}
+
+}
+/*}}}*/
+void       Penta::AddControlInput(int input_enum,Inputs2* inputs2,IoModel* iomodel,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id){/*{{{*/
+
+	/*Intermediaries*/
+	int vertexlids[NUMVERTICES];
+
+	_assert_(iomodel->elements);
+	for(int i=0;i<NUMVERTICES;i++){
+		int vertexid =reCast<int>(iomodel->elements[NUMVERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+		vertexlids[i]=iomodel->my_vertices_lids[vertexid-1];
+	}
+
+	/*Call inputs method*/
+	switch(interpolation_enum){
+		case P1Enum:
+			inputs2->SetPentaControlInput(input_enum,PentaInput2Enum,interpolation_enum,id,NUMVERTICES,vertexlids,values,values_min,values_max);
+			break;
+		default:
+			_error_("Cannot add \""<<EnumToStringx(input_enum)<<"\" interpolation "<<EnumToStringx(interpolation_enum)<<" not supported");
+	}
+
+}
+/*}}}*/
+void       Penta::DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,Inputs2* inputs2,IoModel* iomodel,int input_enum){/*{{{*/
+
+	/*Intermediaries*/
+	int        vertexsids[NUMVERTICES];
+	int        vertexlids[NUMVERTICES];
+	IssmDouble nodeinputs[NUMVERTICES];
+
+	/*Some sanity checks*/
+	if(num_inputs<1)                 _error_("Cannot create a DatasetInput of size <1");
+	if(M!=iomodel->numberofvertices) _error_("Input size not supported yet");
+	if(N!=num_inputs)                _error_("Sizes are not consistent");
+
+	/*Get indices*/
+	_assert_(iomodel->elements);
+	for(int i=0;i<NUMVERTICES;i++){
+		vertexsids[i] = reCast<int>(iomodel->elements[NUMVERTICES*this->Sid()+i])-1;
+		vertexlids[i] = iomodel->my_vertices_lids[vertexsids[i]];
+	}
+
+	/*Create inputs and add to DataSetInput*/
+	for(int i=0;i<num_inputs;i++){
+		for(int j=0;j<NUMVERTICES;j++) nodeinputs[j]=array[vertexsids[j]*N+i];
+		inputs2->SetPentaDatasetInput(input_enum,individual_enums[i],P1Enum,NUMVERTICES,vertexlids,nodeinputs);
+	}
 }
 /*}}}*/
@@ -201,13 +285,13 @@
 
 	/*Retrieve all inputs and parameters we will need*/
-	Input* vx_input = inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-	Input* vy_input = inputs->GetInput(VyAverageEnum); _assert_(vy_input);
-	Input* gr_input = inputs->GetInput(MaskGroundediceLevelsetEnum); _assert_(gr_input);
-	Input* bs_input = inputs->GetInput(BaseEnum);                    _assert_(bs_input);
-	Input* B_input  = inputs->GetInput(MaterialsRheologyBbarEnum);   _assert_(B_input);
-	Input* n_input  = inputs->GetInput(MaterialsRheologyNEnum);   _assert_(n_input);
-	Input* smax_fl_input = inputs->GetInput(CalvingStressThresholdFloatingiceEnum); _assert_(smax_fl_input);
-	Input* smax_gr_input = inputs->GetInput(CalvingStressThresholdGroundediceEnum); _assert_(smax_gr_input);
-	Input* sl_input  = inputs->GetInput(SealevelEnum); _assert_(sl_input);
+	Input2* vx_input = this->GetInput2(VxAverageEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyAverageEnum); _assert_(vy_input);
+	Input2* gr_input = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gr_input);
+	Input2* bs_input = this->GetInput2(BaseEnum);                    _assert_(bs_input);
+	Input2* B_input  = this->GetInput2(MaterialsRheologyBbarEnum);   _assert_(B_input);
+	Input2* n_input  = this->GetInput2(MaterialsRheologyNEnum);   _assert_(n_input);
+	Input2* smax_fl_input = this->GetInput2(CalvingStressThresholdFloatingiceEnum); _assert_(smax_fl_input);
+	Input2* smax_gr_input = this->GetInput2(CalvingStressThresholdGroundediceEnum); _assert_(smax_gr_input);
+	Input2* sl_input  = this->GetInput2(SealevelEnum); _assert_(sl_input);
 
 	/* Start looping on the number of vertices: */
@@ -263,8 +347,8 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new PentaInput(CalvingratexEnum,&calvingratex[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(CalvingrateyEnum,&calvingratey[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(CalvingCalvingrateEnum,&calvingrate[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(SigmaVMEnum,&sigma_vm[0],P1Enum));
+	this->AddBasalInput2(CalvingratexEnum,&calvingratex[0],P1DGEnum);
+	this->AddBasalInput2(CalvingrateyEnum,&calvingratey[0],P1DGEnum);
+	this->AddBasalInput2(CalvingCalvingrateEnum,&calvingrate[0],P1DGEnum);
+	this->AddBasalInput2(SigmaVMEnum,&sigma_vm[0],P1DGEnum);
 
 	this->InputExtrude(CalvingratexEnum,-1);
@@ -293,9 +377,9 @@
 
 	/*Retrieve all inputs and parameters we will need*/
-	Input* vx_input=inputs->GetInput(VxEnum);																		_assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);																		_assert_(vy_input);
-	Input* strainparallel_input=inputs->GetInput(StrainRateparallelEnum);								_assert_(strainparallel_input);
-	Input* strainperpendicular_input=inputs->GetInput(StrainRateperpendicularEnum);              _assert_(strainperpendicular_input);
-	Input* levermanncoeff_input=inputs->GetInput(CalvinglevermannCoeffEnum);                     _assert_(levermanncoeff_input);
+	Input2* vx_input=this->GetInput2(VxEnum);																		_assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);																		_assert_(vy_input);
+	Input2* strainparallel_input=this->GetInput2(StrainRateparallelEnum);								_assert_(strainparallel_input);
+	Input2* strainperpendicular_input=this->GetInput2(StrainRateperpendicularEnum);              _assert_(strainperpendicular_input);
+	Input2* levermanncoeff_input=this->GetInput2(CalvinglevermannCoeffEnum);                     _assert_(levermanncoeff_input);
 
 	/* Start looping on the number of vertices: */
@@ -322,7 +406,7 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new PentaInput(CalvingratexEnum,&calvingratex[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(CalvingrateyEnum,&calvingratey[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(CalvingCalvingrateEnum,&calvingrate[0],P1Enum));
+	this->AddBasalInput2(CalvingratexEnum,&calvingratex[0],P1DGEnum);
+	this->AddBasalInput2(CalvingrateyEnum,&calvingratey[0],P1DGEnum);
+	this->AddBasalInput2(CalvingCalvingrateEnum,&calvingrate[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -336,5 +420,5 @@
 	if(!IsIceInElement() || !IsZeroLevelset(MaskIceLevelsetEnum)){
 		IssmDouble flux_per_area=0;
-		this->inputs->AddInput(new PentaInput(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum);
 	}
 	else{
@@ -349,5 +433,5 @@
 
 		/*Recover parameters and values*/
-		GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+		Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 		/*Be sure that values are not zero*/
@@ -424,9 +508,9 @@
 		IssmDouble calvingratex,calvingratey,thickness,Jdet,flux_per_area;
 		IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-		Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-		Input* calvingratex_input=NULL;
-		Input* calvingratey_input=NULL;
-		calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-		calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
+		Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+		Input2* calvingratex_input=NULL;
+		Input2* calvingratey_input=NULL;
+		calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+		calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
 
 		/*Start looping on Gaussian points*/
@@ -446,5 +530,5 @@
 		}
 
-		this->inputs->AddInput(new PentaInput(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum);
 
 		/*Clean up and return*/
@@ -458,5 +542,5 @@
 	if(!IsIceInElement() || !IsZeroLevelset(MaskIceLevelsetEnum)){
 		IssmDouble flux_per_area=0;
-		this->inputs->AddInput(new PentaInput(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum);
 	}
 	else{
@@ -471,5 +555,5 @@
 
 		/*Recover parameters and values*/
-		GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+		Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 		/*Be sure that values are not zero*/
@@ -546,15 +630,15 @@
 		IssmDouble calvingratex,calvingratey,vx,vy,vel,meltingrate,meltingratex,meltingratey,thickness,Jdet,flux_per_area;
 		IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-		Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-		Input* calvingratex_input=NULL;
-		Input* calvingratey_input=NULL;
-		Input* vx_input=NULL;
-		Input* vy_input=NULL;
-		Input* meltingrate_input=NULL;
-		calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-		calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
-		vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
-		meltingrate_input=inputs->GetInput(CalvingMeltingrateEnum); _assert_(meltingrate_input);
+		Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+		Input2* calvingratex_input=NULL;
+		Input2* calvingratey_input=NULL;
+		Input2* vx_input=NULL;
+		Input2* vy_input=NULL;
+		Input2* meltingrate_input=NULL;
+		calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+		calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
+		vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
+		meltingrate_input=this->GetInput2(CalvingMeltingrateEnum); _assert_(meltingrate_input);
 
 		/*Start looping on Gaussian points*/
@@ -580,5 +664,5 @@
 		}
 
-		this->inputs->AddInput(new PentaInput(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum);
 
 		/*Clean up and return*/
@@ -609,5 +693,5 @@
 	/*retrive parameters: */
 	parameters->FindParam(&analysis_type,AnalysisTypeEnum);
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 
 	/*Check analysis_types*/
@@ -633,8 +717,8 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* pressure_input=inputs->GetInput(PressureEnum); _assert_(pressure_input);
-	Input* vx_input=inputs->GetInput(VxEnum);             _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);             _assert_(vy_input);
-	Input* vz_input=inputs->GetInput(VzEnum);             _assert_(vz_input);
+	Input2* pressure_input=this->GetInput2(PressureEnum); _assert_(pressure_input);
+	Input2* vx_input=this->GetInput2(VxEnum);             _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);             _assert_(vy_input);
+	Input2* vz_input=this->GetInput2(VzEnum);             _assert_(vz_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -693,7 +777,7 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* vx_input=inputs->GetInput(VxEnum);             _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);             _assert_(vy_input);
-	Input* vz_input=inputs->GetInput(VzEnum);             _assert_(vz_input);
+	Input2* vx_input=this->GetInput2(VxEnum);             _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);             _assert_(vy_input);
+	Input2* vz_input=this->GetInput2(VzEnum);             _assert_(vz_input);
 
 	/* Start looping on the number of vertices: */
@@ -721,11 +805,11 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->inputs->AddInput(new PentaInput(DeviatoricStressxxEnum,&tau_xx[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(DeviatoricStressxyEnum,&tau_xy[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(DeviatoricStressxzEnum,&tau_xz[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(DeviatoricStressyyEnum,&tau_yy[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(DeviatoricStressyzEnum,&tau_yz[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(DeviatoricStresszzEnum,&tau_zz[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(DeviatoricStresseffectiveEnum,&tau_eff[0],P1Enum));
+	this->AddInput2(DeviatoricStressxxEnum,&tau_xx[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressxyEnum,&tau_xy[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressxzEnum,&tau_xz[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressyyEnum,&tau_yy[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressyzEnum,&tau_yz[0],P1DGEnum);
+	this->AddInput2(DeviatoricStresszzEnum,&tau_zz[0],P1DGEnum);
+	this->AddInput2(DeviatoricStresseffectiveEnum,&tau_eff[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -750,8 +834,8 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* pressure_input=inputs->GetInput(PressureEnum); _assert_(pressure_input);
-	Input* vx_input=inputs->GetInput(VxEnum);             _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);             _assert_(vy_input);
-	Input* vz_input=inputs->GetInput(VzEnum);             _assert_(vz_input);
+	Input2* pressure_input=this->GetInput2(PressureEnum); _assert_(pressure_input);
+	Input2* vx_input=this->GetInput2(VxEnum);             _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);             _assert_(vy_input);
+	Input2* vz_input=this->GetInput2(VzEnum);             _assert_(vz_input);
 
 	/* Start looping on the number of vertices: */
@@ -775,10 +859,10 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->inputs->AddInput(new PentaInput(StressTensorxxEnum,&sigma_xx[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(StressTensorxyEnum,&sigma_xy[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(StressTensorxzEnum,&sigma_xz[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(StressTensoryyEnum,&sigma_yy[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(StressTensoryzEnum,&sigma_yz[0],P1Enum));
-	this->inputs->AddInput(new PentaInput(StressTensorzzEnum,&sigma_zz[0],P1Enum));
+	this->AddInput2(StressTensorxxEnum,&sigma_xx[0],P1DGEnum);
+	this->AddInput2(StressTensorxyEnum,&sigma_xy[0],P1DGEnum);
+	this->AddInput2(StressTensorxzEnum,&sigma_xz[0],P1DGEnum);
+	this->AddInput2(StressTensoryyEnum,&sigma_yy[0],P1DGEnum);
+	this->AddInput2(StressTensoryzEnum,&sigma_yz[0],P1DGEnum);
+	this->AddInput2(StressTensorzzEnum,&sigma_zz[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -786,5 +870,5 @@
 }
 /*}}}*/
-void       Penta::Configure(Elements* elementsin, Loads* loadsin, Nodes* nodesin,Vertices* verticesin, Materials* materialsin, Parameters* parametersin){/*{{{*/
+void       Penta::Configure(Elements* elementsin, Loads* loadsin, Nodes* nodesin,Vertices* verticesin, Materials* materialsin, Parameters* parametersin,Inputs2* inputs2in){/*{{{*/
 
 	int analysis_counter;
@@ -812,32 +896,36 @@
 	/*point parameters to real dataset: */
 	this->parameters=parametersin;
-
-	/*get inputs configured too: */
-	this->inputs->Configure(parameters);
+	this->inputs2=inputs2in;
 }
 /*}}}*/
 void       Penta::ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N,int M){/*{{{*/
 
+	if(enum_type==MaterialsRheologyBbarEnum) enum_type = MaterialsRheologyBEnum;
+	if(enum_type==DamageDbarEnum)            enum_type = DamageDEnum;
+
+	_error_("not implemented");
 	int    vertexpidlist[NUMVERTICES];
 	IssmDouble grad_list[NUMVERTICES];
-	Input* grad_input=NULL;
-	Input* input=NULL;
+	Input2* grad_input=NULL;
+	Input2* input=NULL;
 
 	if(enum_type==MaterialsRheologyBbarEnum){
-		input=(Input*)inputs->GetInput(MaterialsRheologyBEnum);
+		input=this->GetInput2(MaterialsRheologyBEnum);
 	}
 	else if(enum_type==DamageDbarEnum){
-		input=(Input*)inputs->GetInput(DamageDEnum);
+		input=this->GetInput2(DamageDEnum);
 	}
 	else{
-		input=inputs->GetInput(enum_type);
+		input=this->GetInput2(enum_type);
 	}
 	if (!input) _error_("Input " << EnumToStringx(enum_type) << " not found");
-	if (input->ObjectEnum()!=ControlInputEnum) _error_("Input " << EnumToStringx(enum_type) << " is not a ControlInput");
+	if(input->ObjectEnum()!=ControlInput2Enum) _error_("Input " << EnumToStringx(enum_type) << " is not a ControlInput");
 
 	GradientIndexing(&vertexpidlist[0],control_index);
-	for(int i=0;i<NUMVERTICES;i++) grad_list[i]=gradient[vertexpidlist[i]];
-	grad_input=new PentaInput(GradientEnum,grad_list,P1Enum);
-	((ControlInput*)input)->SetGradient(grad_input);
+
+	//for(int i=0;i<NUMVERTICES;i++) grad_list[i]=gradient[vertexpidlist[i]];
+	//grad_input=new PentaInput(GradientEnum,grad_list,P1Enum);
+	//((ControlInput*)input)->SetGradient(grad_input);
+	_error_("not implemented");
 
 }
@@ -845,44 +933,21 @@
 void       Penta::ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index){/*{{{*/
 
-	int    vertexpidlist[NUMVERTICES];
+	int        idlist[NUMVERTICES];
+	int        vertexlids[NUMVERTICES];
 	IssmDouble grad_list[NUMVERTICES];
-	Input* grad_input=NULL;
-	Input* input=NULL;
-
-	if(enum_type==MaterialsRheologyBbarEnum){
-		input=(Input*)inputs->GetInput(MaterialsRheologyBEnum);
-	}
-	else if(enum_type==DamageDbarEnum){
-		input=(Input*)inputs->GetInput(DamageDEnum);
-	}
-	else{
-		input=inputs->GetInput(enum_type);
-	}
-	if (!input) _error_("Input " << EnumToStringx(enum_type) << " not found");
-	if (input->ObjectEnum()!=ControlInputEnum) _error_("Input " << EnumToStringx(enum_type) << " is not a ControlInput");
-
-	GradientIndexing(&vertexpidlist[0],control_index);
-	for(int i=0;i<NUMVERTICES;i++) grad_list[i]=gradient[vertexpidlist[i]];
-	grad_input=new PentaInput(GradientEnum,grad_list,P1Enum);
-	((ControlInput*)input)->SetGradient(grad_input);
-
+
+	if(enum_type==MaterialsRheologyBbarEnum) enum_type = MaterialsRheologyBEnum;
+	if(enum_type==DamageDbarEnum)            enum_type = DamageDEnum;
+
+	GradientIndexing(&idlist[0],control_index);
+	for(int i=0;i<NUMVERTICES;i++) grad_list[i]=gradient[idlist[i]];
+	for(int i=0;i<NUMVERTICES;i++) vertexlids[i]=this->vertices[i]->lid;
+
+	this->inputs2->SetTriaControlInputGradient(enum_type,P1Enum,NUMVERTICES,&vertexlids[0],&grad_list[0]);
 }/*}}}*/
 void       Penta::ControlToVectors(Vector<IssmPDouble>* vector_control, Vector<IssmPDouble>* vector_gradient,int control_enum){/*{{{*/
 
-	Input* input=NULL;
-
-	if(control_enum==MaterialsRheologyBbarEnum){
-		input=(Input*)inputs->GetInput(MaterialsRheologyBEnum);
-	}
-	else if(control_enum==DamageDbarEnum){
-		input=(Input*)inputs->GetInput(DamageDEnum);
-	}
-	else{
-		input=inputs->GetInput(control_enum);
-	}
-	if (!input) _error_("Input " << EnumToStringx(control_enum) << " not found");
-	if (input->ObjectEnum()!=ControlInputEnum) _error_("Input " << EnumToStringx(control_enum) << " is not a ControlInput");
-
 	int         sidlist[NUMVERTICES];
+	int         lidlist[NUMVERTICES];
 	int         connectivity[NUMVERTICES];
 	IssmPDouble values[NUMVERTICES];
@@ -890,6 +955,15 @@
 	IssmDouble  value,gradient;
 
+	if(control_enum==MaterialsRheologyBbarEnum) control_enum = MaterialsRheologyBEnum;
+	if(control_enum==DamageDbarEnum)            control_enum = DamageDEnum;
+
 	this->GetVerticesConnectivityList(&connectivity[0]);
 	this->GetVerticesSidList(&sidlist[0]);
+	this->GetVerticesLidList(&lidlist[0]);
+
+	ElementInput2* control_value    = this->inputs2->GetControlInput2Data(control_enum,"value");    _assert_(control_value);
+	ElementInput2* control_gradient = this->inputs2->GetControlInput2Data(control_enum,"gradient"); _assert_(control_gradient);
+	control_value->Serve(NUMVERTICES,&lidlist[0]);
+	control_gradient->Serve(NUMVERTICES,&lidlist[0]);
 
 	GaussPenta* gauss=new GaussPenta();
@@ -897,6 +971,6 @@
 		gauss->GaussVertex(iv);
 
-		((ControlInput*)input)->GetInputValue(&value,gauss);
-		((ControlInput*)input)->GetGradientValue(&gradient,gauss);
+		control_value->GetInputValue(&value,gauss);
+		control_gradient->GetInputValue(&gradient,gauss);
 
 		values[iv]    = reCast<IssmPDouble>(value)/reCast<IssmPDouble>(connectivity[iv]);
@@ -907,5 +981,4 @@
 	vector_control->SetValues(NUMVERTICES,&sidlist[0],&values[0],ADD_VAL);
 	vector_gradient->SetValues(NUMVERTICES,&sidlist[0],&gradients[0],ADD_VAL);
-
 }/*}}}*/
 void       Penta::CreateDistanceInputFromSegmentlist(IssmDouble* distances,int distanceenum){/*{{{*/
@@ -913,5 +986,5 @@
 	/*Get current field and vertex coordinates*/
 	IssmDouble ls[NUMVERTICES],distance;
-	GetInputListOnVertices(&ls[0],distanceenum);
+	Element::GetInputListOnVertices(&ls[0],distanceenum);
 
 	/*Get distance from list of segments and reset ls*/
@@ -932,5 +1005,5 @@
 
 	/*Update Levelset*/
-	this->inputs->AddInput(new PentaInput(distanceenum,&ls[0],P1Enum));
+	this->AddInput2(distanceenum,&ls[0],P1Enum);
 }
 /*}}}*/
@@ -949,7 +1022,5 @@
 				/*Get input:*/
 				IssmDouble vel;
-				Input* vel_input;
-
-				vel_input=this->inputs->GetInput(VelEnum); _assert_(vel_input);
+				Input2* vel_input=this->GetInput2(VelEnum); _assert_(vel_input);
 				vel_input->GetInputAverage(&vel);
 
@@ -1014,5 +1085,5 @@
 
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		floatingarea=floatingarea*scalefactor;
@@ -1028,5 +1099,5 @@
 
 	int approximation;
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 	if(approximation==HOApproximationEnum || approximation==SSAApproximationEnum || approximation==SSAHOApproximationEnum || approximation==HOFSApproximationEnum){
 		_error_("Cannot compute contact condition for non FS elements");
@@ -1040,9 +1111,9 @@
 	IssmDouble  sigmayz[NUMVERTICES],sigmaxz[NUMVERTICES],sigma_nn[NUMVERTICES];
 	IssmDouble  viscosity,epsilon[NUMVERTICES];
-	GetInputListOnVertices(&base[0],BaseEnum);
-	GetInputListOnVertices(&bed[0],BedEnum);
-	GetInputListOnVertices(&surface[0],SurfaceEnum);
-	GetInputListOnVertices(&pressure[0],PressureEnum);
-	GetInputListOnVertices(&phi[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&base[0],BaseEnum);
+	Element::GetInputListOnVertices(&bed[0],BedEnum);
+	Element::GetInputListOnVertices(&surface[0],SurfaceEnum);
+	Element::GetInputListOnVertices(&pressure[0],PressureEnum);
+	Element::GetInputListOnVertices(&phi[0],MaskGroundediceLevelsetEnum);
 	IssmDouble rho_ice   = FindParam(MaterialsRhoIceEnum);
 	IssmDouble rho_water = FindParam(MaterialsRhoSeawaterEnum);
@@ -1053,7 +1124,7 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* vx_input = inputs->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = inputs->GetInput(VyEnum); _assert_(vy_input);
-	Input* vz_input = inputs->GetInput(VzEnum); _assert_(vz_input);
+	Input2* vx_input = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input = this->GetInput2(VzEnum); _assert_(vz_input);
 
 	/*1. Recover stresses at the base*/
@@ -1183,5 +1254,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -1237,5 +1308,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -1343,8 +1414,8 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&bed[0],BedEnum);
-	GetInputListOnVertices(&surfaces[0],SurfaceEnum);
-	GetInputListOnVertices(&bases[0],BaseEnum);
-	GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&bed[0],BedEnum);
+	Element::GetInputListOnVertices(&surfaces[0],SurfaceEnum);
+	Element::GetInputListOnVertices(&bases[0],BaseEnum);
+	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
 
 	nrfrontbed=0;
@@ -1438,5 +1509,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&levelset[0],levelsetenum);
+	Element::GetInputListOnVertices(&levelset[0],levelsetenum);
 
 	int* indicesfront = xNew<int>(NUMVERTICES);
@@ -1472,7 +1543,129 @@
 	xDelete<int>(indicesfront);
 }/*}}}*/
+Input2*    Penta::GetInput2(int inputenum){/*{{{*/
+
+	/*Get Input from dataset*/
+	PentaInput2* input = this->inputs2->GetPentaInput(inputenum);
+	if(!input) return input;
+
+	/*Intermediaries*/
+	int numindices;
+	int indices[30]; /*Max numnodes*/
+
+	/*Check interpolation*/
+	int interpolation = input->GetInterpolation();
+	if(interpolation==P1Enum){
+		numindices = 6;
+		for(int i=0;i<6;i++) indices[i] = vertices[i]->lid;
+		input->Serve(numindices,&indices[0]);
+	}
+	else{
+		input->Serve(this->lid,this->GetNumberOfNodes(interpolation));
+	}
+
+	/*Tell input it is NOT collapsed*/
+	//input->SetServeCollapsed(0); FIXME: not needed?
+
+	/*Return*/
+	return input;
+}/*}}}*/
+Input2*    Penta::GetInput2(int inputenum,IssmDouble time){/*{{{*/
+
+	/*Get Input from dataset*/
+	PentaInput2* input = this->inputs2->GetPentaInput(inputenum,time);
+	if(!input) return input;
+
+	/*Intermediaries*/
+	int numindices;
+	int indices[30]; /*Max numnodes*/
+
+	/*Check interpolation*/
+	int interpolation = input->GetInterpolation();
+	if(interpolation==P1Enum){
+		numindices = 6;
+		for(int i=0;i<6;i++) indices[i] = vertices[i]->lid;
+		input->Serve(numindices,&indices[0]);
+	}
+	else{
+		input->Serve(this->lid,this->GetNumberOfNodes(interpolation));
+	}
+
+	/*Tell input it is NOT collapsed*/
+	//input->SetServeCollapsed(0); FIXME: not needed?
+
+	/*Return*/
+	return input;
+}/*}}}*/
+void       Penta::GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussPenta gauss;
+		for(int iv=0;iv<NUMVERTICES;iv++){
+			gauss.GaussVertex(iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<NUMVERTICES;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
+void       Penta::GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/*What type of finite element are we dealing with?*/
+	int fe       = this->FiniteElement();
+	int numnodes = this->GetNumberOfNodes();
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussPenta gauss;
+		for(int iv=0;iv<numnodes;iv++){
+			gauss.GaussNode(fe,iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<numnodes;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
+DatasetInput2* Penta::GetDatasetInput2(int inputenum){/*{{{*/
+
+	DatasetInput2* datasetinput = this->inputs2->GetDatasetInput2(inputenum);
+	if(!datasetinput) return NULL;
+
+	for(int i=0;i<datasetinput->GetNumIds();i++){
+
+		PentaInput2* input = datasetinput->GetPentaInputByOffset(i); _assert_(input);
+
+		/*Intermediaries*/
+		int numindices;
+		int indices[30]; /*Max numnodes*/
+
+		/*Check interpolation*/
+		int interpolation = input->GetInterpolation();
+		if(interpolation==P1Enum){
+			numindices = 6;
+			for(int i=0;i<6;i++) indices[i] = vertices[i]->lid;
+			input->Serve(numindices,&indices[0]);
+		}
+		else{
+			input->Serve(this->lid,this->GetNumberOfNodes(interpolation));
+		}
+
+	}
+
+	return datasetinput;
+}/*}}}*/
 void       Penta::GetInputValue(IssmDouble* pvalue,Node* node,int enumtype){/*{{{*/
 
-	Input* input=inputs->GetInput(enumtype);
+	Input2* input=this->GetInput2(enumtype);
 	if(!input) _error_("No input of type " << EnumToStringx(enumtype) << " found in tria");
 
@@ -1488,5 +1681,5 @@
 void       Penta::GetInputValue(IssmDouble* pvalue,Vertex* vertex,int enumtype){/*{{{*/
 
-	Input* input=inputs->GetInput(enumtype);
+	Input2* input=this->GetInput2(enumtype);
 	if(!input) _error_("No input of type " << EnumToStringx(enumtype) << " found in tria");
 
@@ -1523,5 +1716,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&lsf[0],levelset_enum);
+	Element::GetInputListOnVertices(&lsf[0],levelset_enum);
 
 	/* Determine distribution of ice over element.
@@ -1617,23 +1810,40 @@
 void       Penta::GetVectorFromControlInputs(Vector<IssmDouble>* vector,int control_enum,int control_index,const char* data){/*{{{*/
 
-	int vertexidlist[NUMVERTICES];
-
 	/*Get out if this is not an element input*/
 	if(!IsInputEnum(control_enum)) _error_("Enum "<<EnumToStringx(control_enum)<<" is not in IsInput");
 
 	/*Prepare index list*/
-	GradientIndexing(&vertexidlist[0],control_index);
+	int idlist[NUMVERTICES];
+	GradientIndexing(&idlist[0],control_index);
 
 	/*Get input (either in element or material)*/
 	if(control_enum==MaterialsRheologyBbarEnum) control_enum=MaterialsRheologyBEnum;
-	Input* input=inputs->GetInput(control_enum);
-	if(!input) _error_("Input " << EnumToStringx(control_enum) << " not found in element");
-
-	/*Check that it is a ControlInput*/
-	if (input->ObjectEnum()!=ControlInputEnum){
-		_error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	}
-
-	((ControlInput*)input)->GetVectorFromInputs(vector,&vertexidlist[0],data);
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,data);   _assert_(input);
+
+	/*Intermediaries*/
+	int numindices;
+	int indices[NUMVERTICES];
+
+	/*Check interpolation*/
+	int interpolation = input->GetInterpolation();
+	switch(interpolation){
+		case P1Enum:
+			numindices = NUMVERTICES;
+			for(int i=0;i<NUMVERTICES;i++) indices[i] = vertices[i]->lid;
+			input->Serve(numindices,&indices[0]);
+			break;
+		default: _error_("interpolation "<<EnumToStringx(interpolation)<<" not supported");
+	}
+
+	/* Start looping on the number of vertices: */
+	IssmDouble values[NUMVERTICES];
+	Gauss*gauss=this->NewGauss();
+	for(int iv=0;iv<NUMVERTICES;iv++){
+		gauss->GaussVertex(iv);
+		input->GetInputValue(&values[iv],gauss);
+	}
+	delete gauss;
+
+	vector->SetValues(NUMVERTICES,idlist,&values[0],INS_VAL);
 }
 /*}}}*/
@@ -1642,36 +1852,14 @@
 	int* idlist = NULL;
 	IssmDouble* values = NULL;
-	int* M = NULL;
-
-	/*Get out if this is not an element input*/
-	if(!IsInputEnum(control_enum)) _error_("Enum "<<EnumToStringx(control_enum)<<" is not in IsInput");
-	Input* input=(Input*)this->inputs->GetInput(control_enum);   _assert_(input);
-
-	/*Cast to Controlinput*/
-	if(input->ObjectEnum()!=ControlInputEnum) _error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	ControlInput* controlinput = xDynamicCast<ControlInput*>(input);
-
-	if(strcmp(data,"value")==0){
-		input  = controlinput->values;
-	}
-	else if (strcmp(data,"lowerbound")==0){
-		input = controlinput->minvalues;
-	}
-	else if (strcmp(data,"upperbound")==0){
-		input = controlinput->maxvalues;
-	}
-	else if (strcmp(data,"gradient")==0){
-		input = controlinput->gradient;
-	}
-	else{
-		_error_("Data " << data << " not supported yet");
-	}
+
+	/*Get input*/
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,data);   _assert_(input);
+
 	/*Check what input we are dealing with*/
-
 	switch(input->ObjectEnum()){
-		case PentaInputEnum:
+		case PentaInput2Enum:
 				  {
-					PentaInput* pentainput = xDynamicCast<PentaInput*>(input);
-					if(pentainput->interpolation_type!=P1Enum) _error_("not supported yet");
+					PentaInput2* pentainput = xDynamicCast<PentaInput2*>(input);
+					if(pentainput->GetInputInterpolationType()!=P1Enum) _error_("not supported yet");
 
 					/*Create list of indices and values for global vector*/
@@ -1680,5 +1868,5 @@
 					GradientIndexing(&idlist[0],control_index);
 					for(int i=0;i<NUMVERTICES;i++){
-						values[i] = pentainput->values[i];
+						values[i] = pentainput->element_values[i];
 					}
 					vector->SetValues(NUMVERTICES,idlist,values,INS_VAL);
@@ -1688,22 +1876,5 @@
 				case TransientInputEnum:
 				  {
-					parameters->FindParam(&M,NULL,ControlInputSizeMEnum);
-					TransientInput* transientinput = xDynamicCast<TransientInput*>(input);
-					int N = transientinput->numtimesteps;
-					idlist = xNew<int>(NUMVERTICES*N);
-					values = xNew<IssmDouble>(NUMVERTICES*N);
-					for(int t=0;t<transientinput->numtimesteps;t++) {
-						IssmDouble time = transientinput->GetTimeByOffset(t);
-						input = transientinput->GetTimeInput(time);
-						TriaInput* timeinput = xDynamicCast<TriaInput*>(input);
-						if(timeinput->interpolation_type!=P1Enum) _error_("not supported yet");
-						/*Create list of indices and values for global vector*/
-						for(int i=0;i<NUMVERTICES;i++){
-							idlist[N*i+t] = offset + this->vertices[i]->Sid()+t*M[control_index];
-							values[N*i+t] = timeinput->values[i];
-						}
-					}
-
-					vector->SetValues(NUMVERTICES*transientinput->numtimesteps,idlist,values,INS_VAL);
+					_error_("not implemented (see Tria)");
 					break;
 				  }
@@ -1753,5 +1924,5 @@
 
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		groundedarea=groundedarea*scalefactor;
@@ -1761,8 +1932,10 @@
 }
 /*}}}*/
-IssmDouble Penta::IcefrontMassFluxLevelset(bool scaled){/*{{{*/
-
-	/*Make sure there is an ice front here*/
-	if(!IsIceInElement() || !IsZeroLevelset(MaskIceLevelsetEnum) || !IsOnBase()) return 0;
+IssmDouble Penta::GroundinglineMassFlux(bool scaled){/*{{{*/
+
+	/*Make sure there is a grounding line here*/
+	if(!IsOnBase()) return 0;
+	if(!IsIceInElement()) return 0;
+	if(!IsZeroLevelset(MaskGroundediceLevelsetEnum)) return 0;
 
 	/*Scaled not implemented yet...*/
@@ -1779,5 +1952,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -1857,9 +2030,7 @@
 	IssmDouble vx,vy,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
-	vx_input=inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-	vy_input=inputs->GetInput(VyAverageEnum); _assert_(vy_input);
+	Input2 *thickness_input = this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2 *vx_input        = this->GetInput2(VxAverageEnum); _assert_(vx_input);
+	Input2 *vy_input        = this->GetInput2(VyAverageEnum); _assert_(vy_input);
 
 	/*Start looping on Gaussian points*/
@@ -1876,5 +2047,129 @@
 	}
 
+	/*Clean up and return*/
+	delete gauss;
 	return flux;
+
+}
+/*}}}*/
+IssmDouble Penta::IcefrontMassFluxLevelset(bool scaled){/*{{{*/
+
+	/*Make sure there is an ice front here*/
+	if(!IsIceInElement() || !IsZeroLevelset(MaskIceLevelsetEnum) || !IsOnBase()) return 0;
+
+	/*Scaled not implemented yet...*/
+	_assert_(!scaled);
+
+	int               domaintype,index1,index2;
+	const IssmPDouble epsilon = 1.e-15;
+	IssmDouble        s1,s2;
+	IssmDouble        gl[NUMVERTICES];
+	IssmDouble        xyz_front[2][3];
+
+	IssmDouble *xyz_list = NULL;
+	this->GetVerticesCoordinates(&xyz_list);
+
+	/*Recover parameters and values*/
+	Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+
+	/*Be sure that values are not zero*/
+	if(gl[0]==0.) gl[0]=gl[0]+epsilon;
+	if(gl[1]==0.) gl[1]=gl[1]+epsilon;
+	if(gl[2]==0.) gl[2]=gl[2]+epsilon;
+
+	int pt1 = 0;
+	int pt2 = 1;
+	if(gl[0]*gl[1]>0){ //Nodes 0 and 1 are similar, so points must be found on segment 0-2 and 1-2
+
+		/*Portion of the segments*/
+		s1=gl[2]/(gl[2]-gl[1]);
+		s2=gl[2]/(gl[2]-gl[0]);
+		if(gl[2]<0.){
+			pt1 = 1; pt2 = 0;
+		}
+		xyz_front[pt2][0]=xyz_list[3*2+0]+s1*(xyz_list[3*1+0]-xyz_list[3*2+0]);
+		xyz_front[pt2][1]=xyz_list[3*2+1]+s1*(xyz_list[3*1+1]-xyz_list[3*2+1]);
+		xyz_front[pt2][2]=xyz_list[3*2+2]+s1*(xyz_list[3*1+2]-xyz_list[3*2+2]);
+		xyz_front[pt1][0]=xyz_list[3*2+0]+s2*(xyz_list[3*0+0]-xyz_list[3*2+0]);
+		xyz_front[pt1][1]=xyz_list[3*2+1]+s2*(xyz_list[3*0+1]-xyz_list[3*2+1]);
+		xyz_front[pt1][2]=xyz_list[3*2+2]+s2*(xyz_list[3*0+2]-xyz_list[3*2+2]);
+	}
+	else if(gl[1]*gl[2]>0){ //Nodes 1 and 2 are similar, so points must be found on segment 0-1 and 0-2
+
+		/*Portion of the segments*/
+		s1=gl[0]/(gl[0]-gl[1]);
+		s2=gl[0]/(gl[0]-gl[2]);
+		if(gl[0]<0.){
+			pt1 = 1; pt2 = 0;
+		}
+
+		xyz_front[pt1][0]=xyz_list[3*0+0]+s1*(xyz_list[3*1+0]-xyz_list[3*0+0]);
+		xyz_front[pt1][1]=xyz_list[3*0+1]+s1*(xyz_list[3*1+1]-xyz_list[3*0+1]);
+		xyz_front[pt1][2]=xyz_list[3*0+2]+s1*(xyz_list[3*1+2]-xyz_list[3*0+2]);
+		xyz_front[pt2][0]=xyz_list[3*0+0]+s2*(xyz_list[3*2+0]-xyz_list[3*0+0]);
+		xyz_front[pt2][1]=xyz_list[3*0+1]+s2*(xyz_list[3*2+1]-xyz_list[3*0+1]);
+		xyz_front[pt2][2]=xyz_list[3*0+2]+s2*(xyz_list[3*2+2]-xyz_list[3*0+2]);
+	}
+	else if(gl[0]*gl[2]>0){ //Nodes 0 and 2 are similar, so points must be found on segment 1-0 and 1-2
+
+		/*Portion of the segments*/
+		s1=gl[1]/(gl[1]-gl[0]);
+		s2=gl[1]/(gl[1]-gl[2]);
+		if(gl[1]<0.){
+			pt1 = 1; pt2 = 0;
+		}
+
+		xyz_front[pt2][0]=xyz_list[3*1+0]+s1*(xyz_list[3*0+0]-xyz_list[3*1+0]);
+		xyz_front[pt2][1]=xyz_list[3*1+1]+s1*(xyz_list[3*0+1]-xyz_list[3*1+1]);
+		xyz_front[pt2][2]=xyz_list[3*1+2]+s1*(xyz_list[3*0+2]-xyz_list[3*1+2]);
+		xyz_front[pt1][0]=xyz_list[3*1+0]+s2*(xyz_list[3*2+0]-xyz_list[3*1+0]);
+		xyz_front[pt1][1]=xyz_list[3*1+1]+s2*(xyz_list[3*2+1]-xyz_list[3*1+1]);
+		xyz_front[pt1][2]=xyz_list[3*1+2]+s2*(xyz_list[3*2+2]-xyz_list[3*1+2]);
+	}
+	else{
+		_error_("case not possible");
+	}
+
+
+	/*Some checks in debugging mode*/
+	_assert_(s1>=0 && s1<=1.);
+	_assert_(s2>=0 && s2<=1.);
+
+	/*Get normal vector*/
+	IssmDouble normal[3];
+	this->NormalSectionBase(&normal[0],&xyz_front[0][0]);
+	normal[0] = -normal[0];
+	normal[1] = -normal[1];
+
+	this->InputDepthAverageAtBase(VxEnum,VxAverageEnum);
+	this->InputDepthAverageAtBase(VyEnum,VyAverageEnum);
+
+	/*Get inputs*/
+	IssmDouble flux = 0.;
+	IssmDouble vx,vy,thickness,Jdet;
+	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
+	vx_input=this->GetInput2(VxAverageEnum); _assert_(vx_input);
+	vy_input=this->GetInput2(VyAverageEnum); _assert_(vy_input);
+
+	/*Start looping on Gaussian points*/
+	Gauss* gauss=this->NewGaussBase(xyz_list,&xyz_front[0][0],3);
+	for(int ig=gauss->begin();ig<gauss->end();ig++){
+
+		gauss->GaussPoint(ig);
+		thickness_input->GetInputValue(&thickness,gauss);
+		vx_input->GetInputValue(&vx,gauss);
+		vy_input->GetInputValue(&vy,gauss);
+		this->JacobianDeterminantLine(&Jdet,&xyz_front[0][0],gauss);
+
+		flux += rho_ice*Jdet*gauss->weight*thickness*(vx*normal[0] + vy*normal[1]);
+	}
+
+	/*Clean up and return*/
+	delete gauss;
+	return flux;
+	
 }
 /*}}}*/
@@ -1895,5 +2190,5 @@
 
 	if(scaled==true){ //scale for area projection correction
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		base=base*scalefactor;
@@ -1925,5 +2220,5 @@
 	base = 1./2.*fabs((xyz_list[0][0]-xyz_list[2][0])*(xyz_list[1][1]-xyz_list[0][1]) - (xyz_list[0][0]-xyz_list[1][0])*(xyz_list[2][1]-xyz_list[0][1]));
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		base=base*scalefactor;
@@ -1931,7 +2226,7 @@
 
 	/*Now get the average height above floatation*/
-	Input* surface_input    = inputs->GetInput(SurfaceEnum);    _assert_(surface_input);
-	Input* base_input        = inputs->GetInput(BaseEnum);        _assert_(base_input);
-	Input* bed_input = inputs->GetInput(BedEnum); _assert_(bed_input);
+	Input2* surface_input    = this->GetInput2(SurfaceEnum);    _assert_(surface_input);
+	Input2* base_input        = this->GetInput2(BaseEnum);        _assert_(base_input);
+	Input2* bed_input = this->GetInput2(BedEnum); _assert_(bed_input);
 	if(!bed_input) _error_("Could not find bed");
 	surface_input->GetInputAverage(&surface);
@@ -1949,7 +2244,8 @@
 	IssmDouble  xyz_list_line[2][3];
 	IssmDouble  total[NUMVERTICES]       = {0.};
+	int         lidlist[NUMVERTICES];
 	IssmDouble  intz[NUMVERTICES]        = {0.};
-	Input      *original_input           = NULL;
-	Input      *depth_averaged_input     = NULL;
+	Input2     *original_input           = NULL;
+	Input2     *depth_averaged_input     = NULL;
 
 	/*Are we on the base? If not, return*/
@@ -1959,8 +2255,11 @@
 	Penta* penta = this;
 	int    step  = 0;
+	Gauss* gauss[3];
+	for(int iv=0;iv<3;iv++) gauss[iv] = penta->NewGaussLine(iv,iv+3,3);
+
 	for(;;){
 
 		/*Step1: Get original input (to be depth-avegaged): */
-		original_input=(Input*)penta->inputs->GetInput(original_enum);
+		original_input=penta->GetInput2(original_enum);
 		if(!original_input) _error_("could not find input with enum " << EnumToStringx(original_enum));
 
@@ -1974,13 +2273,11 @@
 			}
 			/*Integrate over edge*/
-			Gauss* gauss=penta->NewGaussLine(iv,iv+3,3);
-			for(int ig=gauss->begin();ig<gauss->end();ig++){
-				gauss->GaussPoint(ig);
-				penta->JacobianDeterminantLine(&Jdet,&xyz_list_line[0][0],gauss);
-				original_input->GetInputValue(&value,gauss);
-				total[iv] += value*Jdet*gauss->weight;
-				intz[iv]  += Jdet*gauss->weight;
+			for(int ig=gauss[iv]->begin();ig<gauss[iv]->end();ig++){
+				gauss[iv]->GaussPoint(ig);
+				penta->JacobianDeterminantLine(&Jdet,&xyz_list_line[0][0],gauss[iv]);
+				original_input->GetInputValue(&value,gauss[iv]);
+				total[iv] += value*Jdet*gauss[iv]->weight;
+				intz[iv]  += Jdet*gauss[iv]->weight;
 			}
-			delete gauss;
 		}
 
@@ -1992,4 +2289,5 @@
 		step++;
 	}
+	for(int iv=0;iv<3;iv++) delete gauss[iv];
 
 	/*Now we only need to divide the depth integrated input by the total thickness!*/
@@ -1998,20 +2296,20 @@
 		total[iv+3] = total[iv];
 	}
+	GetVerticesLidList(&lidlist[0]);
 	switch(original_input->ObjectEnum()){
 		case PentaInputEnum:
+		case PentaInput2Enum:
 		case ControlInputEnum:
-			depth_averaged_input=new PentaInput(average_enum,&total[0],P1Enum);
+			this->inputs2->SetPentaInput(average_enum,P1Enum,NUMVERTICES,lidlist,&total[0]);
 			break;
 		default:
 			_error_("Interpolation " << EnumToStringx(original_input->ObjectEnum()) << " not supported yet");
 	}
-
-	/*Finally, add to inputs*/
-	this->inputs->AddInput((Input*)depth_averaged_input);
-}
-/*}}}*/
-void       Penta::InputExtrude(int enum_type,int start){/*{{{*/
+}
+/*}}}*/
+void       Penta::DatasetInputExtrude(int enum_type,int start){/*{{{*/
 
 	_assert_(start==-1 || start==+1);
+	_assert_(this->inputs2);
 
 	/*Are we on the the boundary we want to be?*/
@@ -2019,28 +2317,180 @@
 	if(start==+1 && !IsOnSurface()) return;
 
-	/*Step1: Get and Extrude original input: */
-	Input* base_input=(Input*)this->inputs->GetInput(enum_type);
-	if(!base_input) _error_("could not find input with enum " << EnumToStringx(enum_type));
-	base_input->Extrude(start);
-
-	/*Stop if there is only one layer of element*/
-	if(start==-1 && this->IsOnSurface()) return;
-	if(start==+1 && this->IsOnBase())    return;
-
-	/*Step 2: this input has been extruded for this element, now follow the upper element*/
-	Penta* penta=this;
-	for(;;){
-		/*get upper/lower Penta*/
-		if(start==-1) penta=penta->GetUpperPenta();
-		else          penta=penta->GetLowerPenta();
-		_assert_(penta->Id()!=this->id);
-
-		/*Add input of the basal element to penta->inputs*/
-		Input* copy=(Input*)base_input->copy();
-		penta->inputs->AddInput((Input*)copy);
-
-		/*Stop if we have reached the surface/base*/
-		if(start==-1 && penta->IsOnSurface()) break;
-		if(start==+1 && penta->IsOnBase())    break;
+	/*Get original input*/
+	DatasetInput2* dinput = this->inputs2->GetDatasetInput2(enum_type);
+
+	int lidlist[NUMVERTICES];
+	this->GetVerticesLidList(&lidlist[0]);
+
+	for(int id=0;id<dinput->GetNumIds();id++){
+
+		PentaInput2* pentainput = dinput->GetPentaInputByOffset(id);
+		pentainput->Serve(NUMVERTICES,&lidlist[0]);
+
+		if(pentainput->GetInterpolation()==P1Enum){
+
+			/*Extrude values first*/
+			IssmDouble extrudedvalues[NUMVERTICES];
+			this->GetInputListOnVertices(&extrudedvalues[0],pentainput,0.);
+
+			if(start==-1){
+				for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues[i+NUMVERTICES2D]=extrudedvalues[i];
+			}
+			else{
+				for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues[i]=extrudedvalues[i+NUMVERTICES2D];
+			}
+
+			/*Propagate to other Pentas*/
+			Penta* penta=this;
+			for(;;){
+
+				/*Add input of the basal element to penta->inputs*/
+				int vertexlids[NUMVERTICES];
+				penta->GetVerticesLidList(&vertexlids[0]);
+				pentainput->SetInput(P1Enum,NUMVERTICES,&vertexlids[0],&extrudedvalues[0]);
+
+				/*Stop if we have reached the surface/base*/
+				if(start==-1 && penta->IsOnSurface()) break;
+				if(start==+1 && penta->IsOnBase())    break;
+
+				/*get upper/lower Penta*/
+				if(start==-1) penta=penta->GetUpperPenta();
+				else          penta=penta->GetLowerPenta();
+				_assert_(penta->Id()!=this->id);
+			}
+		}
+		else{
+			_error_("not implemented yet");
+		}
+	}
+}
+/*}}}*/
+void       Penta::ControlInputExtrude(int enum_type,int start){/*{{{*/
+
+	_assert_(start==-1 || start==+1);
+	_assert_(this->inputs2);
+
+	/*Are we on the the boundary we want to be?*/
+	if(start==-1 && !IsOnBase())    return;
+	if(start==+1 && !IsOnSurface()) return;
+
+	/*Get original input*/
+	ElementInput2* input  = this->inputs2->GetControlInput2Data(enum_type,"value");
+	if(input->ObjectEnum()!=PentaInput2Enum) _error_("not supported yet");
+	PentaInput2* pentainput = xDynamicCast<PentaInput2*>(input);
+	ElementInput2* input2 = this->inputs2->GetControlInput2Data(enum_type,"savedvalues");
+	if(input->ObjectEnum()!=PentaInput2Enum) _error_("not supported yet");
+	PentaInput2* pentainput2= xDynamicCast<PentaInput2*>(input2);
+	/*FIXME: this should not be necessary*/
+	ElementInput2* input3 = this->inputs2->GetControlInput2Data(enum_type,"gradient");
+	if(input->ObjectEnum()!=PentaInput2Enum) _error_("not supported yet");
+	PentaInput2* pentainput3= xDynamicCast<PentaInput2*>(input3);
+
+	int lidlist[NUMVERTICES];
+	this->GetVerticesLidList(&lidlist[0]);
+	pentainput->Serve(NUMVERTICES,&lidlist[0]);
+	pentainput2->Serve(NUMVERTICES,&lidlist[0]);
+	pentainput3->Serve(NUMVERTICES,&lidlist[0]);
+
+	if(pentainput->GetInterpolation()==P1Enum){
+
+		/*Extrude values first*/
+		IssmDouble extrudedvalues[NUMVERTICES];
+		IssmDouble extrudedvalues2[NUMVERTICES];
+		IssmDouble extrudedvalues3[NUMVERTICES];
+
+		this->GetInputListOnVertices(&extrudedvalues[0],pentainput,0.);
+		this->GetInputListOnVertices(&extrudedvalues2[0],pentainput2,0.);
+		this->GetInputListOnVertices(&extrudedvalues3[0],pentainput3,0.);
+
+		if(start==-1){
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues[i+NUMVERTICES2D]=extrudedvalues[i];
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues2[i+NUMVERTICES2D]=extrudedvalues2[i];
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues3[i+NUMVERTICES2D]=extrudedvalues3[i]/2.; /*FIXME: this is just for NR*/
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues3[i]=extrudedvalues3[i]/2.; /*FIXME: this is just for NR*/
+		}
+		else{
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues[i]=extrudedvalues[i+NUMVERTICES2D];
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues2[i]=extrudedvalues2[i+NUMVERTICES2D];
+		}
+
+		/*Propagate to other Pentas*/
+		Penta* penta=this;
+		for(;;){
+
+			if(penta->IsOnSurface() && start==-1){ /*FIXME: this is just for NR*/
+				for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues3[i+NUMVERTICES2D]=0.;
+			}
+
+			/*Add input of the basal element to penta->inputs*/
+			int vertexlids[NUMVERTICES];
+			penta->GetVerticesLidList(&vertexlids[0]);
+			pentainput->SetInput(P1Enum,NUMVERTICES,&vertexlids[0],&extrudedvalues[0]);
+			pentainput2->SetInput(P1Enum,NUMVERTICES,&vertexlids[0],&extrudedvalues2[0]);
+			if(start==-1 && !penta->IsOnBase()){
+				pentainput3->SetInput(P1Enum,NUMVERTICES,&vertexlids[0],&extrudedvalues3[0]);
+			}
+
+			/*Stop if we have reached the surface/base*/
+			if(start==-1 && penta->IsOnSurface()) break;
+			if(start==+1 && penta->IsOnBase())    break;
+
+			/*get upper/lower Penta*/
+			if(start==-1) penta=penta->GetUpperPenta();
+			else          penta=penta->GetLowerPenta();
+			_assert_(penta->Id()!=this->id);
+		}
+	}
+	else{
+		_error_("not implemented yet");
+	}
+}
+/*}}}*/
+void       Penta::InputExtrude(int enum_type,int start){/*{{{*/
+
+	_assert_(start==-1 || start==+1);
+	_assert_(this->inputs2);
+
+	/*Are we on the the boundary we want to be?*/
+	if(start==-1 && !IsOnBase())    return;
+	if(start==+1 && !IsOnSurface()) return;
+
+
+	/*Get original input*/
+	Input2* input = this->GetInput2(enum_type);
+	if(input->ObjectEnum()!=PentaInput2Enum) _error_("not supported yet");
+	PentaInput2* pentainput = xDynamicCast<PentaInput2*>(input);
+
+	if(pentainput->GetInterpolation()==P1Enum || pentainput->GetInterpolation()==P1DGEnum){
+		/*Extrude values first*/
+		IssmDouble extrudedvalues[NUMVERTICES];
+
+		Element::GetInputListOnVertices(&extrudedvalues[0],enum_type);
+		if(start==-1){
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues[i+NUMVERTICES2D]=extrudedvalues[i];
+		}
+		else{
+			for(int i=0;i<NUMVERTICES2D;i++) extrudedvalues[i]=extrudedvalues[i+NUMVERTICES2D];
+		}
+
+		/*Propagate to other Pentas*/
+		Penta* penta=this;
+		for(;;){
+
+			/*Add input of the basal element to penta->inputs*/
+			penta->AddInput2(enum_type,&extrudedvalues[0],pentainput->GetInterpolation());
+
+			/*Stop if we have reached the surface/base*/
+			if(start==-1 && penta->IsOnSurface()) break;
+			if(start==+1 && penta->IsOnBase())    break;
+
+			/*get upper/lower Penta*/
+			if(start==-1) penta=penta->GetUpperPenta();
+			else          penta=penta->GetLowerPenta();
+			_assert_(penta->Id()!=this->id);
+		}
+	}
+	else{
+		_error_("interpolation "<<EnumToStringx(pentainput->GetInterpolation())<<" not implemented yet (while trying to extrude "<<EnumToStringx(enum_type)<<")");
 	}
 }
@@ -2071,102 +2521,4 @@
 		penta_vertex_ids[i]=iomodel->elements[NUMVERTICES*index+i]; //ids for vertices are in the elements array from Matlab
 	}
-
-	/*Control Inputs*/
-	if (control_analysis){
-		iomodel->FindConstant(&controls,NULL,"md.inversion.control_parameters");
-		for(i=0;i<num_control_type;i++){
-			_assert_(controls[i]);
-			int control = StringToEnumx(controls[i]);
-			switch(control){
-				case BalancethicknessThickeningRateEnum:
-					if (iomodel->Data("md.balancethickness.thickening_rate")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.balancethickness.thickening_rate")[penta_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(BalancethicknessThickeningRateEnum,PentaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case VxEnum:
-					if (iomodel->Data("md.initialization.vx")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.initialization.vx")[penta_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(VxEnum,PentaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case VyEnum:
-					if (iomodel->Data("md.initialization.vy")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.initialization.vy")[penta_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(VyEnum,PentaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case FrictionCoefficientEnum:
-					if (iomodel->Data("md.friction.coefficient")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.friction.coefficient")[penta_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(FrictionCoefficientEnum,PentaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case MaterialsRheologyBbarEnum:
-					if(iomodel->Data("md.materials.rheology_B")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.materials.rheology_B")[penta_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(MaterialsRheologyBEnum,PentaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case DamageDbarEnum:
-					if(iomodel->Data("md.damage.D")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.damage.D")[penta_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(penta_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(DamageDEnum,PentaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				default:
-					_error_("Control " << EnumToStringx(control) << " not implemented yet");
-			}
-		}
-		for(i=0;i<num_control_type;i++) xDelete<char>(controls[i]);
-		xDelete<char*>(controls);
-	}
-
-	/*Need to know the type of approximation for this element*/
-	if(iomodel->Data("md.flowequation.element_equation")){
-		this->inputs->AddInput(new IntInput(ApproximationEnum,IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[index]))));
-	}
-
-	/*DatasetInputs*/
-	if(control_analysis && iomodel->Data("md.inversion.cost_functions_coefficients")) {
-
-		/*Generate cost functions associated with the iomodel*/
-		char**	cost_functions			= NULL;
-		int*		cost_functions_enums = NULL;
-		int		num_cost_functions;
-
-		iomodel->FindConstant(&num_cost_functions,"md.inversion.num_cost_functions");
-		iomodel->FindConstant(&cost_functions,&num_cost_functions,"md.inversion.cost_functions");
-		if(num_cost_functions<1) _error_("No cost functions found");
-		cost_functions_enums=xNew<int>(num_cost_functions);
-		for(j=0;j<num_cost_functions;j++){ cost_functions_enums[j]=StringToEnumx(cost_functions[j]); }
-
-		/*Create inputs and add to DataSetInput*/
-		DatasetInput* datasetinput=new DatasetInput(InversionCostFunctionsCoefficientsEnum);
-		for(i=0;i<num_responses;i++){
-			for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.inversion.cost_functions_coefficients")[(penta_vertex_ids[j]-1)*num_responses+i];
-			datasetinput->AddInput(new PentaInput(InversionCostFunctionsCoefficientsEnum,nodeinputs,P1Enum),cost_functions_enums[i]);
-		}
-
-		/*Add datasetinput to element inputs*/
-		this->inputs->AddInput(datasetinput);
-
-		/*Free resources*/
-		for(int j=0;j<num_cost_functions;j++) xDelete<char>(cost_functions[j]);
-		xDelete<char*>(cost_functions);
-		xDelete<int>(cost_functions_enums);
-	}
 }
 /*}}}*/
@@ -2191,5 +2543,5 @@
 
 	/*Add input to the element: */
-	this->inputs->AddInput(new PentaInput(enum_type,values,this->element_type));
+	this->AddInput2(enum_type,values,this->element_type);
 
 	/*Free ressources:*/
@@ -2225,5 +2577,5 @@
 	for(;;){
 		/*Add input to the element: */
-		penta->inputs->AddInput(new PentaInput(enum_type,values,P1Enum));
+		penta->AddInput2(enum_type,values,P1Enum);
 
 		/*Stop if we have reached the surface*/
@@ -2243,4 +2595,7 @@
 	int        *doflist        = NULL;
 	IssmDouble  values[numdof];
+	int         lidlist[NUMVERTICES];
+
+	GetVerticesLidList(&lidlist[0]);
 
 	/*Check that name is an element input*/
@@ -2253,5 +2608,5 @@
 			}
 			/*update input*/
-			this->inputs->AddInput(new PentaInput(name,values,P1Enum));
+			inputs2->SetPentaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 			return;
 
@@ -2261,5 +2616,5 @@
 			}
 			/*update input*/
-			this->inputs->AddInput(new PentaInput(name,values,P1Enum));
+			inputs2->SetPentaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 			return;
 
@@ -2269,5 +2624,5 @@
 			}
 			/*update input*/
-			this->inputs->AddInput(new PentaInput(name,values,P1Enum));
+			inputs2->SetPentaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 			return;
 
@@ -2283,5 +2638,5 @@
 			}
 			/*Add input to the element: */
-			this->inputs->AddInput(new PentaInput(name,values,P1Enum));
+			inputs2->SetPentaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 
 			/*Free ressources:*/
@@ -2296,5 +2651,5 @@
 			}
 			/*Add input to the element: */
-			this->inputs->AddInput(new PentaInput(name,values,P1Enum));
+			inputs2->SetPentaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 
 			/*Free ressources:*/
@@ -2314,5 +2669,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
 
 	/* If only one vertex has ice, there is an ice front here */
@@ -2340,40 +2695,4 @@
 }
 /*}}}*/
-bool       Penta::IsOnBase(void){/*{{{*/
-
-	IssmDouble values[NUMVERTICES];
-	IssmDouble sum;
-
-	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
-	sum = values[0]+values[1]+values[2]+values[3]+values[4]+values[5];
-	_assert_(sum==0. || sum==3.);
-
-	if(sum==3){
-		return true;
-	}
-	else{
-		return false;
-	}
-}
-/*}}}*/
-bool       Penta::IsOnSurface(void){/*{{{*/
-
-	IssmDouble values[NUMVERTICES];
-	IssmDouble sum;
-
-	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
-	sum = values[0]+values[1]+values[2]+values[3]+values[4]+values[5];
-	_assert_(sum==0. || sum==3.);
-
-	if(sum==3){
-		return true;
-	}
-	else{
-		return false;
-	}
-}
-/*}}}*/
 bool       Penta::IsZeroLevelset(int levelset_enum){/*{{{*/
 
@@ -2382,5 +2701,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&ls[0],levelset_enum);
+	Element::GetInputListOnVertices(&ls[0],levelset_enum);
 
 	/*If the level set has always same sign, there is no ice front here*/
@@ -2444,8 +2763,4 @@
 	delete tria->material; delete tria;
 
-	/*Delete Vx and Vy averaged*/
-	this->inputs->DeleteInput(VxAverageEnum);
-	this->inputs->DeleteInput(VyAverageEnum);
-
 	/*clean up and return*/
 	return mass_flux;
@@ -2467,8 +2782,4 @@
 	delete tria->material; delete tria;
 
-	/*Delete Vx and Vy averaged*/
-	this->inputs->DeleteInput(VxAverageEnum);
-	this->inputs->DeleteInput(VyAverageEnum);
-
 	/*clean up and return*/
 	return mass_flux;
@@ -2613,9 +2924,8 @@
 	int found=0;
 	IssmDouble value;
-	Input* data=NULL;
 	GaussPenta* gauss=NULL;
 
 	/*First, serarch the input: */
-	data=inputs->GetInput(natureofdataenum);
+	Input2* data=this->GetInput2(natureofdataenum); 
 
 	/*figure out if we have the vertex id: */
@@ -2742,7 +3052,7 @@
 	rho_ice=FindParam(MaterialsRhoIceEnum);
 	density=rho_ice/rho_water;
-	GetInputListOnVertices(&h[0],ThicknessEnum);
-	GetInputListOnVertices(&r[0],BedEnum);
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&h[0],ThicknessEnum);
+	Element::GetInputListOnVertices(&r[0],BedEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*go through vertices, and figure out which ones are on the ice sheet, and want to unground: */
@@ -2772,5 +3082,5 @@
 			if(this->element_type==MINIcondensedEnum){
 				int approximation;
-				inputs->GetInputValue(&approximation,ApproximationEnum);
+				this->GetInput2Value(&approximation,ApproximationEnum);
 				if(approximation==HOFSApproximationEnum || approximation==SSAFSApproximationEnum){
 					//Do nothing, condensation already done in PVectorCoupling
@@ -2796,5 +3106,5 @@
 		if(analysis_type==StressbalanceAnalysisEnum){
 			int approximation;
-			inputs->GetInputValue(&approximation,ApproximationEnum);
+			this->GetInput2Value(&approximation,ApproximationEnum);
 			if(approximation==HOFSApproximationEnum || approximation==SSAFSApproximationEnum){
 				//Do nothing condensatino already done for Stokes part
@@ -2829,5 +3139,5 @@
 
 	/*For FS only: we want the CS to be tangential to the bedrock*/
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 	if(!IsOnBase() || (approximation!=FSApproximationEnum && approximation!=SSAFSApproximationEnum &&  approximation!=HOFSApproximationEnum)) return;
 
@@ -2836,7 +3146,7 @@
 
 	/*Get inputs*/
-	Input* slopex_input=inputs->GetInput(BedSlopeXEnum); _assert_(slopex_input);
-	Input* slopey_input=inputs->GetInput(BedSlopeYEnum); _assert_(slopey_input);
-	Input* groundedicelevelset_input=inputs->GetInput(MaskGroundediceLevelsetEnum); _assert_(groundedicelevelset_input);
+	Input2* slopex_input=this->GetInput2(BedSlopeXEnum); _assert_(slopex_input);
+	Input2* slopey_input=this->GetInput2(BedSlopeYEnum); _assert_(slopey_input);
+	Input2* groundedicelevelset_input=this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(groundedicelevelset_input);
 
 	/*Loop over basal nodes and update their CS*/
@@ -2915,8 +3225,8 @@
 
 	/*Get inputs*/
-	Input* bed_input = this->GetInput(BedEnum);                     _assert_(bed_input);
-	Input* qsg_input = this->GetInput(FrontalForcingsSubglacialDischargeEnum);		 _assert_(qsg_input);
-	Input* TF_input  = this->GetInput(FrontalForcingsThermalForcingEnum);          _assert_(TF_input);
-	GetInputListOnVertices(&basinid[0],FrontalForcingsBasinIdEnum);
+	Input2* bed_input = this->GetInput2(BedEnum);                     _assert_(bed_input);
+	Input2* qsg_input = this->GetInput2(FrontalForcingsSubglacialDischargeEnum);		 _assert_(qsg_input);
+	Input2* TF_input  = this->GetInput2(FrontalForcingsThermalForcingEnum);          _assert_(TF_input);
+	Element::GetInputListOnVertices(&basinid[0],FrontalForcingsBasinIdEnum);
 
 	this->FindParam(&yts, ConstantsYtsEnum);
@@ -2950,5 +3260,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new PentaInput(CalvingMeltingrateEnum,&meltrates[0],P1Enum));
+	this->AddInput2(CalvingMeltingrateEnum,&meltrates[0],P1Enum);
 
 	this->InputExtrude(CalvingMeltingrateEnum,-1);
@@ -2957,4 +3267,24 @@
 	xDelete<IssmDouble>(basin_icefront_area);
 	delete gauss;
+}
+/*}}}*/
+void       Penta::SetElementInput(int enum_in,IssmDouble value){/*{{{*/
+
+	this->SetElementInput(this->inputs2,enum_in,value);
+
+}
+/*}}}*/
+void       Penta::SetElementInput(Inputs2* inputs2,int enum_in,IssmDouble value){/*{{{*/
+
+	_assert_(inputs2);
+	inputs2->SetPentaInput(enum_in,P0Enum,this->lid,value);
+
+}
+/*}}}*/
+void       Penta::SetElementInput(Inputs2* inputs2,int numindices,int* indices,IssmDouble* values,int enum_in){/*{{{*/
+
+	_assert_(inputs2);
+	inputs2->SetPentaInput(enum_in,P1Enum,numindices,indices,values);
+
 }
 /*}}}*/
@@ -2985,11 +3315,12 @@
 		values[i]=vector[vertexpidlist[i]];
 	}
-	Input* new_input = new PentaInput(control_enum,values,P1Enum);
-	Input* input=(Input*)this->inputs->GetInput(control_enum);   _assert_(input);
+	_error_("not implemented");
+	//Input* new_input = new PentaInput(control_enum,values,P1Enum);
+	Input2* input=(Input2*)this->GetInput2(control_enum);   _assert_(input);
 	if(input->ObjectEnum()!=ControlInputEnum){
 		_error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
 	}
 
-	((ControlInput*)input)->SetInput(new_input);
+	//((ControlInput*)input)->SetInput(new_input);
 
 	if(control_init==MaterialsRheologyBbarEnum){
@@ -3004,5 +3335,6 @@
 
 	IssmDouble  values[NUMVERTICES];
-	int         vertexpidlist[NUMVERTICES],control_init;
+	int         lidlist[NUMVERTICES];
+	int         idlist[NUMVERTICES],control_init;
 
 	/*Specific case for depth averaged quantities*/
@@ -3017,27 +3349,40 @@
 	}
 
+	/*Get Domain type*/
+	int domaintype;
+	parameters->FindParam(&domaintype,DomainTypeEnum);
+
+	/*Specific case for depth averaged quantities*/
+	if(domaintype==Domain2DverticalEnum){
+		if(control_enum==MaterialsRheologyBbarEnum){
+			control_enum=MaterialsRheologyBEnum;
+			if(!IsOnBase()) return;
+		}
+		if(control_enum==DamageDbarEnum){
+			control_enum=DamageDEnum;
+			if(!IsOnBase()) return;
+		}
+	}
+
 	/*Get out if this is not an element input*/
 	if(!IsInputEnum(control_enum)) return;
 
-	/*Prepare index list*/
-	GradientIndexing(&vertexpidlist[0],control_index);
+	/*prepare index list*/
+	this->GetVerticesLidList(&lidlist[0]);
+	GradientIndexing(&idlist[0],control_index);
 
 	/*Get values on vertices*/
 	for(int i=0;i<NUMVERTICES;i++){
-		values[i]=vector[vertexpidlist[i]];
-	}
-	Input* new_input = new PentaInput(control_enum,values,P1Enum);
-	Input* input=(Input*)this->inputs->GetInput(control_enum);   _assert_(input);
-	if(input->ObjectEnum()!=ControlInputEnum){
-		_error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	}
-
-	((ControlInput*)input)->SetInput(new_input);
-
+		values[i]=vector[idlist[i]];
+	}
+
+	/*Set Input*/
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,"value");   _assert_(input);
+	input->SetInput(P1Enum,NUMVERTICES,&lidlist[0],&values[0]);
 	if(control_init==MaterialsRheologyBbarEnum){
-		this->InputExtrude(control_enum,-1);
+		this->ControlInputExtrude(control_enum,-1);
 	}
 	if(control_init==DamageDbarEnum){
-		this->InputExtrude(control_enum,-1);
+		this->ControlInputExtrude(control_enum,-1);
 	}
 }
@@ -3080,24 +3425,10 @@
 			_error_("not supported yet");
 	}
-	if(this->inputs->GetInput(VxEnum)) this->InputDepthAverageAtBase(VxEnum,VxAverageEnum);
-	if(this->inputs->GetInput(VyEnum)) this->InputDepthAverageAtBase(VyEnum,VyAverageEnum);
-	if(this->inputs->GetInput(CalvingratexEnum)) this->InputDepthAverageAtBase(CalvingratexEnum,CalvingratexAverageEnum);
-	if(this->inputs->GetInput(CalvingrateyEnum)) this->InputDepthAverageAtBase(CalvingrateyEnum,CalvingrateyAverageEnum);
+	if(this->GetInput2(VxEnum)) this->InputDepthAverageAtBase(VxEnum,VxAverageEnum);
+	if(this->GetInput2(VyEnum)) this->InputDepthAverageAtBase(VyEnum,VyAverageEnum);
+	if(this->GetInput2(CalvingratexEnum)) this->InputDepthAverageAtBase(CalvingratexEnum,CalvingratexAverageEnum);
+	if(this->GetInput2(CalvingrateyEnum)) this->InputDepthAverageAtBase(CalvingrateyEnum,CalvingrateyAverageEnum);
 
 	Tria* tria=(Tria*)SpawnTria(0,1,2);
-	switch(this->material->ObjectEnum()){
-		case MaticeEnum:
-			this->inputs->DeleteInput(MaterialsRheologyBbarEnum);
-			this->inputs->DeleteInput(DamageDbarEnum);
-			break;
-		case MatestarEnum:
-			break;
-		default:
-			_error_("not supported yet");
-	}
-	this->inputs->DeleteInput(VxAverageEnum);
-	this->inputs->DeleteInput(VyAverageEnum);
-	this->inputs->DeleteInput(CalvingratexAverageEnum);
-	this->inputs->DeleteInput(CalvingrateyAverageEnum);
 
 	return tria;
@@ -3123,8 +3454,17 @@
 	Tria* tria=new Tria();
 	tria->id=this->id;
-	tria->inputs=(Inputs*)this->inputs->SpawnTriaInputs(index1,index2,index3);
+	tria->sid=this->sid;
+	tria->lid=this->lid;
 	tria->parameters=this->parameters;
+	tria->inputs2=this->inputs2;
 	tria->element_type=P1Enum; //Only P1 CG for now (TO BE CHANGED)
 	this->SpawnTriaHook(xDynamicCast<ElementHook*>(tria),index1,index2,index3);
+
+	if(index1==0 && index2==1 && index3==2){
+		tria->iscollapsed = 1;
+	}
+	else if(index1==3 && index2==4 && index3==5){
+		tria->iscollapsed = 2;
+	}
 
 	/*Spawn material*/
@@ -3197,7 +3537,7 @@
 
 	/*Retrieve all inputs we will need*/
-	Input* vx_input=inputs->GetInput(VxEnum);                                  _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);                                  _assert_(vy_input);
-	Input* vz_input=inputs->GetInput(VzEnum);												_assert_(vz_input);
+	Input2* vx_input=this->GetInput2(VxEnum);                                  _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);                                  _assert_(vy_input);
+	Input2* vz_input=this->GetInput2(VzEnum);												_assert_(vz_input);
 
 	/* Start looping on the number of vertices: */
@@ -3222,5 +3562,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new PentaInput(StrainRateparallelEnum,&strainparallel[0],P1Enum));
+	this->AddInput2(StrainRateparallelEnum,&strainparallel[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -3244,7 +3584,7 @@
 
 	/*Retrieve all inputs we will need*/
-	Input* vx_input=inputs->GetInput(VxEnum);                                  _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);                                  _assert_(vy_input);
-	Input* vz_input=inputs->GetInput(VzEnum);												_assert_(vz_input);
+	Input2* vx_input=this->GetInput2(VxEnum);                                  _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);                                  _assert_(vy_input);
+	Input2* vz_input=this->GetInput2(VzEnum);												_assert_(vz_input);
 
 	/* Start looping on the number of vertices: */
@@ -3269,5 +3609,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new PentaInput(StrainRateperpendicularEnum,&strainperpendicular[0],P1Enum));
+	this->AddInput2(StrainRateperpendicularEnum,&strainperpendicular[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -3301,13 +3641,13 @@
 
 		/*Retrieve all inputs we will need*/
-		Input* vx_input=inputs->GetInput(VxEnum);                                  _assert_(vx_input);
-		Input* vy_input=inputs->GetInput(VyEnum);                                  _assert_(vy_input);
-		Input* vel_input=inputs->GetInput(VelEnum);                                _assert_(vel_input);
-		Input* pressure_input=inputs->GetInput(PressureEnum);                      _assert_(pressure_input);
-		Input* deviaxx_input=inputs->GetInput(DeviatoricStressxxEnum);             _assert_(deviaxx_input);
-		Input* deviaxy_input=inputs->GetInput(DeviatoricStressxyEnum);             _assert_(deviaxy_input);
-		Input* deviayy_input=inputs->GetInput(DeviatoricStressyyEnum);             _assert_(deviayy_input);
-		Input* surface_input=inputs->GetInput(SurfaceEnum);								_assert_(surface_input);
-		Input* thickness_input=inputs->GetInput(ThicknessEnum);							_assert_(thickness_input);
+		Input2* vx_input=this->GetInput2(VxEnum);                                  _assert_(vx_input);
+		Input2* vy_input=this->GetInput2(VyEnum);                                  _assert_(vy_input);
+		Input2* vel_input=this->GetInput2(VelEnum);                                _assert_(vel_input);
+		Input2* pressure_input=this->GetInput2(PressureEnum);                      _assert_(pressure_input);
+		Input2* deviaxx_input=this->GetInput2(DeviatoricStressxxEnum);             _assert_(deviaxx_input);
+		Input2* deviaxy_input=this->GetInput2(DeviatoricStressxyEnum);             _assert_(deviaxy_input);
+		Input2* deviayy_input=this->GetInput2(DeviatoricStressyyEnum);             _assert_(deviayy_input);
+		Input2* surface_input=this->GetInput2(SurfaceEnum);								_assert_(surface_input);
+		Input2* thickness_input=this->GetInput2(ThicknessEnum);							_assert_(thickness_input);
 
 		/* Start looping on the number of 2D vertices: */
@@ -3349,5 +3689,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new PentaInput(StressIntensityFactorEnum,&ki[0],P1Enum));
+	this->AddInput2(StressIntensityFactorEnum,&ki[0],P1Enum);
 	this->InputExtrude(StressIntensityFactorEnum,-1);
 }
@@ -3360,5 +3700,5 @@
 
 	/*retrieve inputs :*/
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 
 	/*If on water, return 0: */
@@ -3391,8 +3731,6 @@
 IssmDouble Penta::TimeAdapt(void){/*{{{*/
 
-	int    i;
-	IssmDouble C,dx,dy,dz,dt;
-	IssmDouble maxabsvx,maxabsvy,maxabsvz;
-	IssmDouble maxx,minx,maxy,miny,maxz,minz;
+	/*intermediary: */
+	IssmDouble C;
 	IssmDouble xyz_list[NUMVERTICES][3];
 
@@ -3401,32 +3739,35 @@
 
 	/*Get for Vx and Vy, the max of abs value: */
-	maxabsvx = this->inputs->MaxAbs(VxEnum);
-	maxabsvy = this->inputs->MaxAbs(VyEnum);
-	maxabsvz = this->inputs->MaxAbs(VzEnum);
+	Input2* vx_input = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* vz_input = this->GetInput2(VzEnum); _assert_(vz_input);
+	IssmDouble maxabsvx = vx_input->GetInputMaxAbs();
+	IssmDouble maxabsvy = vy_input->GetInputMaxAbs();
+	IssmDouble maxabsvz = vz_input->GetInputMaxAbs();
 
 	/* Get node coordinates and dof list: */
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
 
-	minx=xyz_list[0][0];
-	maxx=xyz_list[0][0];
-	miny=xyz_list[0][1];
-	maxy=xyz_list[0][1];
-	minz=xyz_list[0][2];
-	maxz=xyz_list[0][2];
-
-	for(i=1;i<NUMVERTICES;i++){
-		if (xyz_list[i][0]<minx)minx=xyz_list[i][0];
-		if (xyz_list[i][0]>maxx)maxx=xyz_list[i][0];
-		if (xyz_list[i][1]<miny)miny=xyz_list[i][1];
-		if (xyz_list[i][1]>maxy)maxy=xyz_list[i][1];
-		if (xyz_list[i][2]<minz)minz=xyz_list[i][2];
-		if (xyz_list[i][2]>maxz)maxz=xyz_list[i][2];
-	}
-	dx=maxx-minx;
-	dy=maxy-miny;
-	dz=maxz-minz;
+	IssmDouble minx=xyz_list[0][0];
+	IssmDouble maxx=xyz_list[0][0];
+	IssmDouble miny=xyz_list[0][1];
+	IssmDouble maxy=xyz_list[0][1];
+	IssmDouble minz=xyz_list[0][2];
+	IssmDouble maxz=xyz_list[0][2];
+
+	for(int i=1;i<NUMVERTICES;i++){
+		if(xyz_list[i][0]<minx) minx=xyz_list[i][0];
+		if(xyz_list[i][0]>maxx) maxx=xyz_list[i][0];
+		if(xyz_list[i][1]<miny) miny=xyz_list[i][1];
+		if(xyz_list[i][1]>maxy) maxy=xyz_list[i][1];
+		if(xyz_list[i][2]<minz) minz=xyz_list[i][2];
+		if(xyz_list[i][2]>maxz) maxz=xyz_list[i][2];
+	}
+	IssmDouble dx=maxx-minx;
+	IssmDouble dy=maxy-miny;
+	IssmDouble dz=maxz-minz;
 
 	/*CFL criterion: */
-	dt=C/(maxabsvx/dx+maxabsvy/dy+maxabsvz/dz);
+	IssmDouble dt = C/(maxabsvx/dx+maxabsvy/dy+maxabsvz/dz);
 
 	return dt;
@@ -3450,5 +3791,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -3525,9 +3866,9 @@
 	IssmDouble calvingratex,calvingratey,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* calvingratex_input=NULL;
-	Input* calvingratey_input=NULL;
-	calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-	calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* calvingratex_input=NULL;
+	Input2* calvingratey_input=NULL;
+	calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+	calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
 
 	/*Start looping on Gaussian points*/
@@ -3544,8 +3885,8 @@
 	}
 
-	return flux;
-
 	/*Clean up and return*/
 	delete gauss;
+	return flux;
+
 }
 /*}}}*/
@@ -3568,5 +3909,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -3646,15 +3987,15 @@
 	IssmDouble calvingratex,calvingratey,vx,vy,vel,meltingrate,meltingratex,meltingratey,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* calvingratex_input=NULL;
-	Input* calvingratey_input=NULL;
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
-	Input* meltingrate_input=NULL;
-	calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-	calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
-	vx_input=inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-	vy_input=inputs->GetInput(VyAverageEnum); _assert_(vy_input);
-	meltingrate_input=inputs->GetInput(CalvingMeltingrateEnum); _assert_(meltingrate_input);
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* calvingratex_input=NULL;
+	Input2* calvingratey_input=NULL;
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
+	Input2* meltingrate_input=NULL;
+	calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+	calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
+	vx_input=this->GetInput2(VxAverageEnum); _assert_(vx_input);
+	vy_input=this->GetInput2(VyAverageEnum); _assert_(vy_input);
+	meltingrate_input=this->GetInput2(CalvingMeltingrateEnum); _assert_(meltingrate_input);
 
 	/*Start looping on Gaussian points*/
@@ -3677,8 +4018,8 @@
 	}
 
-	return flux;
-
 	/*Clean up and return*/
 	delete gauss;
+	return flux;
+
 }
 /*}}}*/
@@ -3698,9 +4039,9 @@
 	/*Get material parameters :*/
 	rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* floatingmelt_input = this->GetInput(BasalforcingsFloatingiceMeltingRateEnum); _assert_(floatingmelt_input);
-	Input* gllevelset_input = this->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
-	Input* scalefactor_input = NULL;
+	Input2* floatingmelt_input = this->GetInput2(BasalforcingsFloatingiceMeltingRateEnum); _assert_(floatingmelt_input); 
+	Input2* gllevelset_input = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+	Input2* scalefactor_input = NULL;
 	if(scaled==true){
-		scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input); 
 	}
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
@@ -3743,9 +4084,9 @@
 	/*Get material parameters :*/
 	rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* groundedmelt_input = this->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedmelt_input);
-	Input* gllevelset_input = this->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
-	Input* scalefactor_input = NULL;
+	Input2* groundedmelt_input = this->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedmelt_input);
+	Input2* gllevelset_input   = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+	Input2* scalefactor_input  = NULL;
 	if(scaled==true){
-		scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input); 
 	}
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
@@ -3793,9 +4134,9 @@
 
 	/*Now get the average SMB over the element*/
-	Input* smb_input = inputs->GetInput(SmbMassBalanceEnum); _assert_(smb_input);
+	Input2* smb_input = this->GetInput2(SmbMassBalanceEnum); _assert_(smb_input);
 
 	smb_input->GetInputAverage(&smb);
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);// average scalefactor on element
 	}
@@ -3809,5 +4150,5 @@
 }
 /*}}}*/
-void       Penta::Update(int index,IoModel* iomodel,int analysis_counter,int analysis_type,int finiteelement_type){ /*{{{*/
+void       Penta::Update(Inputs2* inputs2,int index,IoModel* iomodel,int analysis_counter,int analysis_type,int finiteelement_type){ /*{{{*/
 
 	/*Intermediaries*/
@@ -4188,7 +4529,4 @@
 	xDelete<int>(penta_node_ids);
 
-	/*Fill with IoModel*/
-	this->InputUpdateFromIoModel(index,iomodel);
-
 	/*Defaults if not provided in iomodel*/
 	switch(analysis_type){
@@ -4198,29 +4536,33 @@
 
 			if((IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[index])))==HOFSApproximationEnum){
+				int vertexlids[NUMVERTICES];
+				for(i=0;i<NUMVERTICES;i++) vertexlids[i]=iomodel->my_vertices_lids[penta_vertex_ids[i]-1];
 				/*Create VzHO and VzFS Enums*/
 				if(iomodel->Data("md.initialization.vz") && iomodel->Data("md.flowequation.borderFS")){
 					for(i=0;i<6;i++) nodeinputs[i]=iomodel->Data("md.initialization.vz")[penta_vertex_ids[i]-1]*iomodel->Data("md.flowequation.borderFS")[penta_vertex_ids[i]-1];
-					this->inputs->AddInput(new PentaInput(VzFSEnum,nodeinputs,P1Enum));
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzFSEnum);
 					for(i=0;i<6;i++) nodeinputs[i]=iomodel->Data("md.initialization.vz")[penta_vertex_ids[i]-1]*(1-iomodel->Data("md.flowequation.borderFS")[penta_vertex_ids[i]-1]);
-					this->inputs->AddInput(new PentaInput(VzHOEnum,nodeinputs,P1Enum));
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzHOEnum);
 				}
 				else{
 					for(i=0;i<6;i++)nodeinputs[i]=0;
-					this->inputs->AddInput(new PentaInput(VzFSEnum,nodeinputs,P1Enum));
-					this->inputs->AddInput(new PentaInput(VzHOEnum,nodeinputs,P1Enum));
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzFSEnum);
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzHOEnum);
 				}
 			}
 			if((IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[index])))==SSAFSApproximationEnum){
+				int vertexlids[NUMVERTICES];
+				for(i=0;i<NUMVERTICES;i++) vertexlids[i]=iomodel->my_vertices_lids[penta_vertex_ids[i]-1];
 				/*Create VzSSA and VzFS Enums*/
 				if(iomodel->Data("md.initialization.vz") && iomodel->Data("md.flowequation.borderFS")){
 					for(i=0;i<6;i++) nodeinputs[i]=iomodel->Data("md.initialization.vz")[penta_vertex_ids[i]-1]*iomodel->Data("md.flowequation.borderFS")[penta_vertex_ids[i]-1];
-					this->inputs->AddInput(new PentaInput(VzFSEnum,nodeinputs,P1Enum));
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzFSEnum);
 					for(i=0;i<6;i++) nodeinputs[i]=iomodel->Data("md.initialization.vz")[penta_vertex_ids[i]-1]*(1-iomodel->Data("md.flowequation.borderFS")[penta_vertex_ids[i]-1]);
-					this->inputs->AddInput(new PentaInput(VzSSAEnum,nodeinputs,P1Enum));
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzSSAEnum);
 				}
 				else{
 					for(i=0;i<6;i++)nodeinputs[i]=0;
-					this->inputs->AddInput(new PentaInput(VzFSEnum,nodeinputs,P1Enum));
-					this->inputs->AddInput(new PentaInput(VzSSAEnum,nodeinputs,P1Enum));
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzFSEnum);
+					this->SetElementInput(inputs2,NUMVERTICES,vertexlids,nodeinputs,VzSSAEnum);
 				}
 			}
@@ -4240,6 +4582,6 @@
 
 	this->parameters->FindParam(&extrusioninput,InputToExtrudeEnum);
-	Input* input = inputs->GetInput(extrusioninput);      _assert_(extrusioninput);
-	Input* onbase = inputs->GetInput(MeshVertexonbaseEnum); _assert_(onbase);
+	Input2* input = this->GetInput2(extrusioninput);      _assert_(extrusioninput);
+	Input2* onbase = this->GetInput2(MeshVertexonbaseEnum); _assert_(onbase);
 
 	GaussPenta* gauss=new GaussPenta();
@@ -4265,5 +4607,5 @@
 
 	this->parameters->FindParam(&extrusioninput,InputToExtrudeEnum);
-	Input* input = inputs->GetInput(extrusioninput); _assert_(extrusioninput);
+	Input2* input = this->GetInput2(extrusioninput); _assert_(extrusioninput);
 
 	GaussPenta* gauss=new GaussPenta();
@@ -4333,5 +4675,5 @@
 }
 /*}}}*/
-void       Penta::ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){/*{{{*/
+void       Penta::ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -4360,48 +4702,40 @@
 void       Penta::InputUpdateFromMatrixDakota(IssmDouble* matrix, int nrows, int ncols, int name, int type){/*{{{*/
 
-	int             i,t,row;
-	IssmDouble      time;
-	TransientInput *transientinput = NULL;
-	IssmDouble      values[6];
-	IssmDouble      value;
-
 	/*Check that name is an element input*/
 	if(!IsInputEnum(name)) _error_("Enum "<<EnumToStringx(name)<<" is not in IsInput");
+	TransientInput2* transientinput = inputs2->GetTransientInput(name);
 
 	switch(type){
 
 		case VertexEnum:
+
+			/*Get LID lists once for all*/
+			IssmDouble  values[NUMVERTICES];
+			int         lidlist[NUMVERTICES];
+			this->GetVerticesLidList(&lidlist[0]);
+
 			/*Create transient input: */
-			for(t=0;t<ncols;t++){ //ncols is the number of times
+			for(int t=0;t<ncols;t++){ //ncols is the number of times
 
 				/*create input values: */
-				for(i=0;i<6;i++){
-					row=this->vertices[i]->Sid();
+				for(int i=0;i<6;i++){
+					int row=this->vertices[i]->Sid();
 					values[i]=matrix[ncols*row+t];
 				}
 
 				/*time:*/
-				time=matrix[(nrows-1)*ncols+t];
-
-				if(t==0) transientinput=new TransientInput(name);
-				transientinput->AddTimeInput(new PentaInput(name,values,P1Enum),time);
-				transientinput->Configure(parameters);
+				IssmDouble time=matrix[(nrows-1)*ncols+t];
+
+				transientinput->AddPentaTimeInput(t,NUMVERTICES,&lidlist[0],&values[0],P1Enum);
 			}
-			this->inputs->AddInput(transientinput);
 			break;
 
 		case ElementEnum:
 			/*Get value for the element: */
-			for(t=0;t<ncols;t++){ //ncols is the number of times
-				value=matrix[ncols*(this->Sid())+t];
-
-				/*time:*/
-				time=matrix[(nrows-1)*ncols+t];
-
-				if(t==0) transientinput=new TransientInput(name);
-				transientinput->AddTimeInput(new PentaInput(name,&value,P0Enum),time);
-				transientinput->Configure(parameters);
+			for(int t=0;t<ncols;t++){ //ncols is the number of times
+				IssmDouble value=matrix[ncols*(this->Sid())+t];
+				IssmDouble time=matrix[(nrows-1)*ncols+t];
+				transientinput->AddPentaTimeInput(t,1,&(this->lid),&value,P0Enum);
 			}
-			this->inputs->AddInput(transientinput);
 			break;
 
@@ -4442,8 +4776,8 @@
 
 					/*retrieve inputs: */
-					GetInputListOnVertices(&thickness_init[0],ThicknessEnum);
-					GetInputListOnVertices(&hydrostatic_ratio[0],GeometryHydrostaticRatioEnum);
-					GetInputListOnVertices(&bed[0],BaseEnum);
-					GetInputListOnVertices(&surface[0],SurfaceEnum);
+					Element::GetInputListOnVertices(&thickness_init[0],ThicknessEnum);
+					Element::GetInputListOnVertices(&hydrostatic_ratio[0],GeometryHydrostaticRatioEnum);
+					Element::GetInputListOnVertices(&bed[0],BaseEnum);
+					Element::GetInputListOnVertices(&surface[0],SurfaceEnum);
 
 					/*build new thickness: */
@@ -4496,11 +4830,11 @@
 
 					/*Add new inputs: */
-					this->inputs->AddInput(new PentaInput(ThicknessEnum,thickness,P1Enum));
-					this->inputs->AddInput(new PentaInput(BaseEnum,bed,P1Enum));
-					this->inputs->AddInput(new PentaInput(SurfaceEnum,surface,P1Enum));
+					this->AddInput2(ThicknessEnum,thickness,P1Enum);
+					this->AddInput2(BaseEnum,bed,P1Enum);
+					this->AddInput2(SurfaceEnum,surface,P1Enum);
 					break;
 
 				default:
-					this->inputs->AddInput(new PentaInput(name,values,P1Enum));
+					this->AddInput2(name,values,P1Enum);
 			}
 			break;
@@ -4510,5 +4844,5 @@
 			/*Get value for the element: */
 			value=vector[this->Sid()]; //careful, vector of values here is not parallel distributed, but serial distributed (from a serial Dakota core!)
-			this->inputs->AddInput(new PentaInput(name,&value,P0Enum));
+			this->AddInput2(name,&value,P0Enum);
 			break;
 
Index: /issm/trunk/src/c/classes/Elements/Penta.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Penta.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Penta.h	(revision 24686)
@@ -1,3 +1,3 @@
-/*! \file Penta.h 
+/*! \file Penta.h
  *  \brief: header file for penta object
  */
@@ -35,5 +35,5 @@
 		/*Penta constructors and destructor: {{{*/
 		Penta(){};
-		Penta(int penta_id,int penta_sid,IoModel* iomodel,int nummodels);
+		Penta(int penta_id,int penta_sid,int penta_lid,IoModel* iomodel,int nummodels);
 		~Penta();
 		/*}}}*/
@@ -44,6 +44,10 @@
 		/*}}}*/
 		/*Penta routines:{{{*/
-		void           AddBasalInput(int input_enum, IssmDouble* values, int interpolation_enum);
-		void           AddInput(int input_enum, IssmDouble* values, int interpolation_enum);
+		void           AddBasalInput2(int input_enum, IssmDouble* values, int interpolation_enum);
+		void           AddInput2(int input_enum, IssmDouble* values, int interpolation_enum);
+		void           AddControlInput(int input_enum,Inputs2* inputs2,IoModel* iomodel,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id);
+		void           ControlInputExtrude(int enum_type,int start);
+		void           DatasetInputExtrude(int enum_type,int start);
+		void           DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,Inputs2* inputs2,IoModel* iomodel,int input_enum);
 		void           AverageOntoPartition(Vector<IssmDouble>* partition_contributions,Vector<IssmDouble>* partition_areas,IssmDouble* vertex_response,IssmDouble* qmu_part);
 		void           BasalNodeIndices(int* pnumindices,int** pindices,int finiteelement);
@@ -58,5 +62,5 @@
 		void           ComputeSigmaNN(){_error_("not implemented yet");};
 		void           ComputeStressTensor();
-		void           Configure(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters);
+		void           Configure(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters,Inputs2* inputs2in);
 		void           ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N,int M);
 		void           ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index);
@@ -79,4 +83,10 @@
 		IssmDouble		GetIcefrontArea();
 		void           GetIcefrontCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum);
+		Input2*        GetInput2(int enumtype);
+		Input2*        GetInput2(int enumtype,IssmDouble time);
+		Input2*        GetInput2(int inputenum,IssmDouble start_time,IssmDouble end_time){_error_("not implemented yet!");};
+		void        GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
+		void        GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
+		DatasetInput2* GetDatasetInput2(int inputenum);
 		void           GetInputValue(IssmDouble* pvalue,Vertex* vertex,int enumtype);
 		void           GetInputValue(IssmDouble* pvalue,Node* node,int enumtype);
@@ -95,4 +105,5 @@
 		void           GetVerticesCoordinatesTop(IssmDouble** pxyz_list);
 		IssmDouble     GroundedArea(bool scaled);
+		IssmDouble     GroundinglineMassFlux(bool scaled);
 		IssmDouble		IcefrontMassFluxLevelset(bool scaled);
 		IssmDouble     IceVolume(bool scaled);
@@ -107,6 +118,4 @@
 		bool           IsIcefront(void);
 		bool           IsNodeOnShelfFromFlags(IssmDouble* flags);
-		bool	         IsOnBase(void);
-		bool	         IsOnSurface(void);
 		bool           IsZeroLevelset(int levelset_enum);
 		void           JacobianDeterminant(IssmDouble*  Jdet, IssmDouble* xyz_list,Gauss* gauss);
@@ -154,4 +163,7 @@
 		void           ResetHooks();
 		void				RignotMeltParameterization();
+		void           SetElementInput(int enum_in,IssmDouble values);
+		void           SetElementInput(Inputs2* inputs2,int enum_in,IssmDouble values);
+		void           SetElementInput(Inputs2* inputs2,int numindices,int* indices,IssmDouble* values,int enum_in);
 		void           SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index,int offset, int N,int M);
 		void           SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index);
@@ -174,5 +186,5 @@
 		IssmDouble     TotalGroundedBmb(bool scaled);
 		IssmDouble     TotalSmb(bool scaled);
-		void           Update(int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
+		void           Update(Inputs2* inputs,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
 		void           UpdateConstraintsExtrudeFromBase(void);
 		void           UpdateConstraintsExtrudeFromTop(void);
@@ -184,5 +196,5 @@
 		void           VerticalSegmentIndices(int** pindices,int* pnumseg);
 		void           VerticalSegmentIndicesBase(int** pindices,int* pnumseg);
-		void           ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input);
+		void           ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input);
 
 		#ifdef _HAVE_DAKOTA_
Index: /issm/trunk/src/c/classes/Elements/Seg.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Seg.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Seg.cpp	(revision 24686)
@@ -13,4 +13,6 @@
 #include <string.h>
 #include "../classes.h"
+#include "../Inputs2/SegInput2.h"
+#include "../Inputs2/TriaInput2.h"
 #include "../../shared/shared.h"
 /*}}}*/
@@ -19,16 +21,22 @@
 #define NUMVERTICES 2
 /*Constructors/destructor/copy*/
-Seg::Seg(int seg_id, int seg_sid,IoModel* iomodel,int nummodels)/*{{{*/
+Seg::Seg(int seg_id, int seg_sid,int seg_lid,IoModel* iomodel,int nummodels)/*{{{*/
 		:ElementHook(nummodels,seg_id,NUMVERTICES,iomodel){
+
+			this->iscollapsed = 0;
+			this->collapsed_ids[0] = -1;
+			this->collapsed_ids[1] = -1;
 
 			/*id: */
 			this->id  = seg_id;
 			this->sid = seg_sid;
+			this->lid = seg_lid;
+
+			/*surface and base*/
+			this->isonsurface = false;
+			this->isonbase    = false;
 
 			//this->parameters: we still can't point to it, it may not even exist. Configure will handle this.
 			this->parameters = NULL;
-
-			/*intialize inputs: */
-			this->inputs  = new Inputs();
 
 			/*initialize pointers:*/
@@ -39,4 +47,8 @@
 			/*Only allocate pointer*/
 			this->element_type_list=xNew<int>(nummodels);
+
+			/*surface and base*/
+			this->isonsurface = true;
+			this->isonbase    = true;
 		}
 /*}}}*/
@@ -51,4 +63,8 @@
 
 	seg=new Seg();
+
+	seg->iscollapsed=this->iscollapsed;
+	seg->collapsed_ids[0]=this->collapsed_ids[0];
+	seg->collapsed_ids[1]=this->collapsed_ids[1];
 
 	//deal with TriaRef mother class
@@ -82,6 +98,7 @@
 	seg->id  = this->id;
 	seg->sid = this->sid;
-	if(this->inputs) seg->inputs = (Inputs*)(this->inputs->Copy());
-	else seg->inputs=new Inputs();
+	seg->lid = this->lid;
+	seg->isonbase  = this->isonbase;
+	seg->isonsurface  = this->isonsurface;
 
 	/*point parameters: */
@@ -106,4 +123,9 @@
 
 	MARSHALLING_ENUM(SegEnum);
+	MARSHALLING(this->iscollapsed);
+	MARSHALLING(this->isonsurface);
+	MARSHALLING(this->isonbase);
+	MARSHALLING(this->collapsed_ids[0]);
+	MARSHALLING(this->collapsed_ids[1]);
 
 	/*Call parent classes: */
@@ -142,5 +164,6 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&levelset[0],levelsetenum);
+	Element::GetInputListOnVertices(&levelset[0],levelsetenum);
+
 	/* Get nodes where there is no ice */
 	nrfrontnodes=0;
@@ -163,4 +186,73 @@
 	*pxyz_front=xyz_front;
 }/*}}}*/
+Input2*    Seg::GetInput2(int inputenum){/*{{{*/
+
+	if(this->iscollapsed){
+		TriaInput2* input = this->inputs2->GetTriaInput(inputenum);
+		if(!input) return input;
+
+		/*Intermediaries*/
+		int numindices;
+		int indices[7];
+
+		/*Check interpolation*/
+		int interpolation = input->GetInterpolation();
+		switch(interpolation){
+			case P0Enum:
+				numindices = 1;
+				indices[0] = this->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1Enum:
+				numindices = 2;
+				for(int i=0;i<numindices;i++) indices[i] = vertices[i]->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1DGEnum:
+			case P1bubbleEnum:
+			default:
+				input->ServeCollapsed(this->lid,this->collapsed_ids[0],this->collapsed_ids[1]);
+				break;
+		}
+		/*Flag as collapsed for later use*/
+		input->SetServeCollapsed(true);
+
+		return input;
+	}
+	else{
+		SegInput2* input = this->inputs2->GetSegInput(inputenum);
+		if(!input) return input;
+
+		/*Intermediaries*/
+		int numindices;
+		int indices[7];
+
+		/*Check interpolation*/
+		int interpolation = input->GetInterpolation();
+		switch(interpolation){
+			case P0Enum:
+				numindices = 1;
+				indices[0] = this->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1Enum:
+				numindices = 3;
+				for(int i=0;i<3;i++) indices[i] = vertices[i]->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1DGEnum:
+				numindices = 3;
+				input->Serve(this->lid,numindices);
+				break;
+			default:
+				input->Serve(this->lid,this->GetNumberOfNodes(interpolation));
+		}
+
+		return input;
+	}
+}/*}}}*/
+Input2*    Seg::GetInput2(int inputenum,IssmDouble time){/*{{{*/
+	_error_("not implemented yet");
+}/*}}}*/
 IssmDouble Seg::GetGroundedPortion(IssmDouble* xyz_list){/*{{{*/
 	/*Computeportion of the element that is grounded*/ 
@@ -172,5 +264,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -209,4 +301,44 @@
 
 }/*}}}*/
+void       Seg::GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussSeg gauss;
+		for(int iv=0;iv<NUMVERTICES;iv++){
+			gauss.GaussVertex(iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<NUMVERTICES;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
+void       Seg::GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/*What type of finite element are we dealing with?*/
+	int fe       = this->FiniteElement();
+	int numnodes = this->GetNumberOfNodes();
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussSeg gauss;
+		for(int iv=0;iv<numnodes;iv++){
+			gauss.GaussNode(fe,iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<numnodes;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
 bool       Seg::IsIcefront(void){/*{{{*/
 
@@ -216,5 +348,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
 
 	/* If only one vertex has ice, there is an ice front here */
Index: /issm/trunk/src/c/classes/Elements/Seg.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Seg.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Seg.h	(revision 24686)
@@ -1,3 +1,3 @@
-/*! \file Seg.h 
+/*! \file Seg.h
  *  \brief: header file for seg object
  */
@@ -28,8 +28,10 @@
 
 	public:
+		int iscollapsed;
+		int collapsed_ids[2];
 
 		/*Seg constructors, destructors {{{*/
 		Seg(){};
-		Seg(int seg_id,int seg_sid,IoModel* iomodel,int nummodels);
+		Seg(int seg_id,int seg_sid,int seg_lid,IoModel* iomodel,int nummodels);
 		~Seg();
 		/*}}}*/
@@ -40,6 +42,4 @@
 		/*}}}*/
 		/*Element virtual functions definitions: {{{*/
-		void        AddBasalInput(int input_enum, IssmDouble* values, int interpolation_enum){_error_("not implemented yet");};
-		void        AddInput(int input_enum, IssmDouble* values, int interpolation_enum){_error_("not implemented yet");};
 		void        AverageOntoPartition(Vector<IssmDouble>* partition_contributions,Vector<IssmDouble>* partition_areas,IssmDouble* vertex_response,IssmDouble* qmu_part){_error_("not implemented yet");};
 		void        CalvingRateLevermann(void){_error_("not implemented yet");};
@@ -49,5 +49,5 @@
 		void        ComputeSigmaNN(){_error_("not implemented yet");};
 		void        ComputeStressTensor(){_error_("not implemented yet");};
-		void        Configure(Elements* elements,Loads* loads,Nodes* nodesin,Vertices* verticesin,Materials* materials,Parameters* parameters){_error_("not implemented yet");};
+		void        Configure(Elements* elements,Loads* loads,Nodes* nodesin,Vertices* verticesin,Materials* materials,Parameters* parameters,Inputs2* inputs2in){_error_("not implemented yet");};
 		void        ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N,int M){_error_("not implemented yet");};
 		void        ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index){_error_("not implemented yet");};
@@ -63,4 +63,9 @@
 		IssmDouble  GetGroundedPortion(IssmDouble* xyz_list);
 		void		   GetIcefrontCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum);
+		Input2*     GetInput2(int enumtype);
+		Input2*     GetInput2(int enumtype,IssmDouble time);
+		Input2*     GetInput2(int inputenum,IssmDouble start_time,IssmDouble end_time){_error_("not implemented yet!");};
+		void        GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
+		void        GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
 		void        GetInputValue(IssmDouble* pvalue,Vertex* vertex,int enumtype){_error_("not implemented yet");};
 		void		   GetLevelCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum,IssmDouble level){_error_("not implemented");};
@@ -86,6 +91,4 @@
 		bool		   IsIcefront(void);
 		bool        IsNodeOnShelfFromFlags(IssmDouble* flags){_error_("not implemented yet");};
-		bool        IsOnBase(){_error_("not implemented yet");};
-		bool        IsOnSurface(){_error_("not implemented yet");};
 		bool        IsZeroLevelset(int levelset_enum){_error_("not implemented");};
 		void        JacobianDeterminant(IssmDouble*  Jdet, IssmDouble* xyz_list,Gauss* gauss);
@@ -148,5 +151,5 @@
 		IssmDouble  TotalGroundedBmb(bool scaled){_error_("not implemented yet");};
 		IssmDouble  TotalSmb(bool scaled){_error_("not implemented yet");};
-		void        Update(int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement){_error_("not implemented yet");};
+		void        Update(Inputs2* inputs2,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement){_error_("not implemented yet");};
 		void        UpdateConstraintsExtrudeFromBase(){_error_("not implemented");};
 		void        UpdateConstraintsExtrudeFromTop(){_error_("not implemented");};
@@ -158,5 +161,4 @@
 		void        VerticalSegmentIndices(int** pindices,int* pnumseg){_error_("not implemented yet");};
 		void        VerticalSegmentIndicesBase(int** pindices,int* pnumseg){_error_("not implemented yet");};
-		void        ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){_error_("not implemented yet");};
 		IssmDouble     GetArea3D(void){_error_("not implemented yet!");};
 		IssmDouble     GetAreaSpherical(void){_error_("not implemented yet!");};
Index: /issm/trunk/src/c/classes/Elements/Tetra.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tetra.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Tetra.cpp	(revision 24686)
@@ -13,4 +13,5 @@
 #include <string.h>
 #include "../classes.h"
+#include "../Inputs2/ElementInput2.h"
 #include "../../shared/shared.h"
 /*}}}*/
@@ -20,5 +21,5 @@
 
 /*Constructors/destructor/copy*/
-Tetra::Tetra(int tet_id, int tet_sid,IoModel* iomodel,int nummodels)/*{{{*/
+Tetra::Tetra(int tet_id, int tet_sid,int tet_lid,IoModel* iomodel,int nummodels)/*{{{*/
 		:ElementHook(nummodels,tet_id,NUMVERTICES,iomodel){
 
@@ -26,10 +27,12 @@
 			this->id  = tet_id;
 			this->sid = tet_sid;
+			this->lid = tet_lid;
+
+			/*surface and base*/
+			this->isonsurface = false;
+			this->isonbase    = false;
 
 			//this->parameters: we still can't point to it, it may not even exist. Configure will handle this.
 			this->parameters = NULL;
-
-			/*intialize inputs: */
-			this->inputs  = new Inputs();
 
 			/*initialize pointers:*/
@@ -40,4 +43,18 @@
 			/*Only allocate pointer*/
 			this->element_type_list=xNew<int>(nummodels);
+
+			/*surface and base*/
+			_assert_(iomodel->Data("md.mesh.vertexonsurface"));
+			_assert_(iomodel->Data("md.mesh.vertexonbase"));
+			this->isonsurface = false;
+			this->isonbase    = false;
+			IssmDouble sum = 0.;
+			for(int i=0;i<NUMVERTICES;i++) sum += iomodel->Data("md.mesh.vertexonsurface")[reCast<int>(iomodel->elements[(tet_id-1)*NUMVERTICES+i])-1];
+			_assert_(sum>=0 && sum<4);
+			if(sum>2.5) this->isonsurface = true;
+			sum = 0.;
+			for(int i=0;i<NUMVERTICES;i++) sum += iomodel->Data("md.mesh.vertexonbase")[reCast<int>(iomodel->elements[(tet_id-1)*NUMVERTICES+i])-1];
+			_assert_(sum>=0 && sum<4);
+			if(sum>2.5) this->isonbase = true;
 		}
 /*}}}*/
@@ -83,6 +100,7 @@
 	tetra->id  = this->id;
 	tetra->sid = this->sid;
-	if(this->inputs) tetra->inputs = (Inputs*)(this->inputs->Copy());
-	else tetra->inputs=new Inputs();
+	tetra->lid = this->lid;
+	tetra->isonbase  = this->isonbase;
+	tetra->isonsurface  = this->isonsurface;
 
 	/*point parameters: */
@@ -103,4 +121,6 @@
 
 	MARSHALLING_ENUM(TetraEnum);
+	MARSHALLING(this->isonsurface);
+	MARSHALLING(this->isonbase);
 
 	/*Call parent classes: */
@@ -115,12 +135,5 @@
 /*}}}*/
 
-void     Tetra::AddInput(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
-
-	/*Call inputs method*/
-	_assert_(this->inputs);
-	this->inputs->AddInput(new TetraInput(input_enum,values,interpolation_enum));
-}
-/*}}}*/
-void     Tetra::Configure(Elements* elementsin, Loads* loadsin, Nodes* nodesin,Vertices* verticesin, Materials* materialsin, Parameters* parametersin){/*{{{*/
+void     Tetra::Configure(Elements* elementsin, Loads* loadsin, Nodes* nodesin,Vertices* verticesin, Materials* materialsin, Parameters* parametersin,Inputs2* inputs2in){/*{{{*/
 
 	int analysis_counter;
@@ -146,7 +159,5 @@
 	/*point parameters to real dataset: */
 	this->parameters=parametersin;
-
-	/*get inputs configured too: */
-	this->inputs->Configure(parameters);
+	this->inputs2=inputs2in;
 }
 /*}}}*/
@@ -183,5 +194,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
 
 	for(int i=0;i<4;i++){
@@ -203,5 +214,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&values[0],MaskIceLevelsetEnum);
 
 	for(int i=0;i<4;i++){
@@ -223,5 +234,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
 
 	for(int i=0;i<4;i++){
@@ -246,7 +257,53 @@
 }
 /*}}}*/
+Input2*    Tetra::GetInput2(int inputenum){/*{{{*/
+	_error_("not implemented yet");
+}/*}}}*/
+Input2*    Tetra::GetInput2(int inputenum,IssmDouble time){/*{{{*/
+	_error_("not implemented yet");
+}/*}}}*/
+void       Tetra::GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussTetra gauss;
+		for(int iv=0;iv<NUMVERTICES;iv++){
+			gauss.GaussVertex(iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<NUMVERTICES;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
+void       Tetra::GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/*What type of finite element are we dealing with?*/
+	int fe       = this->FiniteElement();
+	int numnodes = this->GetNumberOfNodes();
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussTetra gauss;
+		for(int iv=0;iv<numnodes;iv++){
+			gauss.GaussNode(fe,iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<numnodes;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
 void     Tetra::GetInputValue(IssmDouble* pvalue,Node* node,int enumtype){/*{{{*/
 
-	Input* input=inputs->GetInput(enumtype);
+	Input2* input=this->GetInput2(enumtype);
 	if(!input) _error_("No input of type " << EnumToStringx(enumtype) << " found in tria");
 
@@ -306,5 +363,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
 	sum = values[0]+values[1]+values[2]+values[3];
 
@@ -325,5 +382,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
 	sum = values[0]+values[1]+values[2]+values[3];
 
@@ -363,102 +420,4 @@
 		tetra_vertex_ids[i]=iomodel->elements[NUMVERTICES*index+i]; //ids for vertices are in the elements array from Matlab
 	}
-
-	/*Control Inputs*/
-	if (control_analysis){
-		iomodel->FindConstant(&controls,NULL,"md.inversion.control_parameters");
-		for(i=0;i<num_control_type;i++){
-			_assert_(controls[i]);
-			int control = StringToEnumx(controls[i]);
-			switch(control){
-				case BalancethicknessThickeningRateEnum:
-					if (iomodel->Data("md.balancethickness.thickening_rate")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.balancethickness.thickening_rate")[tetra_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(BalancethicknessThickeningRateEnum,TetraInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case VxEnum:
-					if (iomodel->Data("md.initialization.vx")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.initialization.vx")[tetra_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(VxEnum,TetraInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case VyEnum:
-					if (iomodel->Data("md.initialization.vy")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.initialization.vy")[tetra_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(VyEnum,TetraInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case FrictionCoefficientEnum:
-					if (iomodel->Data("md.friction.coefficient")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.friction.coefficient")[tetra_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(FrictionCoefficientEnum,TetraInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case MaterialsRheologyBbarEnum:
-					if(iomodel->Data("md.materials.rheology_B")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.materials.rheology_B")[tetra_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(MaterialsRheologyBEnum,TetraInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case DamageDbarEnum:
-					if(iomodel->Data("md.damage.D")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.damage.D")[tetra_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tetra_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(DamageDEnum,TetraInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				default:
-					_error_("Control " << EnumToStringx(control) << " not implemented yet");
-			}
-		}
-		for(i=0;i<num_control_type;i++) xDelete<char>(controls[i]);
-		xDelete<char*>(controls);
-	}
-
-	/*Need to know the type of approximation for this element*/
-	if(iomodel->Data("md.flowequation.element_equation")){
-		this->inputs->AddInput(new IntInput(ApproximationEnum,IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[index]))));
-	}
-
-	/*DatasetInputs*/
-	if (control_analysis && iomodel->Data("md.inversion.cost_functions_coefficients")) {
-
-		/*Generate cost functions associated with the iomodel*/
-		char**	cost_functions			= NULL;
-		int*		cost_functions_enums = NULL;
-		int		num_cost_functions;
-
-		iomodel->FindConstant(&num_cost_functions,"md.inversion.num_cost_functions");
-		iomodel->FindConstant(&cost_functions,&num_cost_functions,"md.inversion.cost_functions");
-		if(num_cost_functions<1) _error_("No cost functions found");
-		cost_functions_enums=xNew<int>(num_cost_functions);
-		for(j=0;j<num_cost_functions;j++){ cost_functions_enums[j]=StringToEnumx(cost_functions[j]); }
-
-		/*Create inputs and add to DataSetInput*/
-		DatasetInput* datasetinput=new DatasetInput(InversionCostFunctionsCoefficientsEnum);
-		for(i=0;i<num_responses;i++){
-			for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.inversion.cost_functions_coefficients")[(tetra_vertex_ids[j]-1)*num_responses+i];
-			datasetinput->AddInput(new TetraInput(InversionCostFunctionsCoefficientsEnum,nodeinputs,P1Enum),cost_functions_enums[i]);
-		}
-
-		/*Add datasetinput to element inputs*/
-		this->inputs->AddInput(datasetinput);
-
-		/*Clean up cost functions*/
-		xDelete<int>(cost_functions_enums);
-		for(int j=0;j<num_cost_functions;j++) xDelete<char>(cost_functions[j]); 
-		xDelete<char*>(cost_functions);
-	}
 }
 /*}}}*/
@@ -483,5 +442,5 @@
 
 	/*Add input to the element: */
-	this->inputs->AddInput(new TetraInput(enum_type,values,this->element_type));
+	this->AddInput2(enum_type,values,this->element_type);
 
 	/*Free ressources:*/
@@ -494,5 +453,5 @@
 	/*Retrieve all inputs and parameters*/
 	IssmDouble ls[NUMVERTICES];
-	GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
 
 	/* If only one vertex has ice, there is an ice front here */
@@ -504,12 +463,4 @@
 	return false;
 }/*}}}*/
-bool     Tetra::IsOnBase(){/*{{{*/
-	return HasFaceOnBase();
-}
-/*}}}*/
-bool     Tetra::IsOnSurface(){/*{{{*/
-	return HasFaceOnSurface();
-}
-/*}}}*/
 void     Tetra::JacobianDeterminant(IssmDouble* pJdet,IssmDouble* xyz_list,Gauss* gauss){/*{{{*/
 
@@ -739,12 +690,12 @@
 
 	/*For FS only: we want the CS to be tangential to the bedrock*/
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 	if(!HasNodeOnBase() ||  approximation!=FSApproximationEnum) return;
 
 	//printf("element number %i \n",this->id);
 	/*Get inputs*/
-	Input* slopex_input=inputs->GetInput(BedSlopeXEnum); _assert_(slopex_input);
-	Input* slopey_input=inputs->GetInput(BedSlopeYEnum); _assert_(slopey_input);
-	Input* groundedicelevelset_input=inputs->GetInput(MaskGroundediceLevelsetEnum); _assert_(groundedicelevelset_input);
+	Input2* slopex_input=this->GetInput2(BedSlopeXEnum); _assert_(slopex_input);
+	Input2* slopey_input=this->GetInput2(BedSlopeYEnum); _assert_(slopey_input);
+	Input2* groundedicelevelset_input=this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(groundedicelevelset_input);
 	vertexonbase = xNew<IssmDouble>(numnodes);
 	this->GetInputListOnNodesVelocity(&vertexonbase[0],MeshVertexonbaseEnum);
@@ -844,5 +795,4 @@
 	Tria* tria=new Tria();
 	tria->id=this->id;
-	tria->inputs=(Inputs*)this->inputs->SpawnTriaInputs(index1,index2,index3);
 	tria->parameters=this->parameters;
 	tria->element_type=P1Enum; //Only P1 CG for now (TO BE CHANGED)
@@ -864,5 +814,5 @@
 }
 /*}}}*/
-void     Tetra::Update(int index,IoModel* iomodel,int analysis_counter,int analysis_type,int finiteelement_type){ /*{{{*/
+void     Tetra::Update(Inputs2* inputs2,int index,IoModel* iomodel,int analysis_counter,int analysis_type,int finiteelement_type){ /*{{{*/
 
 	/*Intermediaries*/
@@ -992,5 +942,5 @@
 }
 /*}}}*/
-void     Tetra::ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){/*{{{*/
+void     Tetra::ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){/*{{{*/
 
 	/*Intermediaries*/
Index: /issm/trunk/src/c/classes/Elements/Tetra.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tetra.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Tetra.h	(revision 24686)
@@ -1,3 +1,3 @@
-/*! \file Tetra.h 
+/*! \file Tetra.h
  *  \brief: header file for seg object
  */
@@ -31,5 +31,5 @@
 		/*Tetra constructors, destructors {{{*/
 		Tetra(){};
-		Tetra(int tet_id,int tet_sid,IoModel* iomodel,int nummodels);
+		Tetra(int tet_id,int tet_sid,int tet_lid,IoModel* iomodel,int nummodels);
 		~Tetra();
 		/*}}}*/
@@ -40,6 +40,4 @@
 		/*}}}*/
 		/*Element virtual functions definitions: {{{*/
-		void        AddBasalInput(int input_enum, IssmDouble* values, int interpolation_enum){_error_("not implemented yet");};
-		void        AddInput(int input_enum, IssmDouble* values, int interpolation_enum);
 		void        AverageOntoPartition(Vector<IssmDouble>* partition_contributions,Vector<IssmDouble>* partition_areas,IssmDouble* vertex_response,IssmDouble* qmu_part){_error_("not implemented yet");};
 		void        CalvingRateLevermann(void){_error_("not implemented yet");};
@@ -49,5 +47,5 @@
 		void        ComputeDeviatoricStressTensor(){_error_("not implemented yet");};
 		void        ComputeEsaStrainAndVorticity(){_error_("not implemented yet!");};
-		void        Configure(Elements* elements,Loads* loads,Nodes* nodesin,Vertices* verticesin,Materials* materials,Parameters* parameters);
+		void        Configure(Elements* elements,Loads* loads,Nodes* nodesin,Vertices* verticesin,Materials* materials,Parameters* parameters,Inputs2* inputs2in);
 		void        ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N,int M){_error_("not implemented yet");};
 		void        ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index){_error_("not implemented yet");};
@@ -62,6 +60,6 @@
 		IssmDouble  FloatingArea(bool scaled){_error_("not implemented yet");};
 		void        FSContactMigration(Vector<IssmDouble>* vertexgrounded,Vector<IssmDouble>* vertexfloating){_error_("not implemented yet");};
-		IssmDouble     GetArea3D(void){_error_("not implemented yet!");};
-		IssmDouble     GetAreaSpherical(void){_error_("not implemented yet!");};
+		IssmDouble  GetArea3D(void){_error_("not implemented yet!");};
+		IssmDouble  GetAreaSpherical(void){_error_("not implemented yet!");};
 		Element*    GetBasalElement(void){_error_("not implemented yet");};
 		int         GetElementType(void);
@@ -69,4 +67,9 @@
 		IssmDouble  GetGroundedPortion(IssmDouble* xyz_list){_error_("not implemented yet");};
 		void		   GetIcefrontCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum){_error_("not implemented yet");};
+		Input2*     GetInput2(int enumtype);
+		Input2*     GetInput2(int enumtype,IssmDouble time);
+		Input2*     GetInput2(int inputenum,IssmDouble start_time,IssmDouble end_time){_error_("not implemented yet!");};
+		void        GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
+		void        GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
 		void        GetInputValue(IssmDouble* pvalue,Node* node,int enumtype);
 		void		   GetLevelCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum,IssmDouble level){_error_("not implemented yet");};
@@ -88,6 +91,4 @@
 		bool		   IsIcefront(void);
 		bool        IsNodeOnShelfFromFlags(IssmDouble* flags){_error_("not implemented yet");};
-		bool        IsOnBase();
-		bool        IsOnSurface();
 		bool        IsZeroLevelset(int levelset_enum){_error_("not implemented");};
 		void        InputDepthAverageAtBase(int enum_type,int average_enum_type){_error_("not implemented yet");};
@@ -157,5 +158,5 @@
 		IssmDouble  TotalGroundedBmb(bool scaled){_error_("not implemented yet");};
 		IssmDouble  TotalSmb(bool scaled){_error_("not implemented yet");};
-		void        Update(int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
+		void        Update(Inputs2* inputs2,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
 		void        UpdateConstraintsExtrudeFromBase(){_error_("not implemented");};
 		void        UpdateConstraintsExtrudeFromTop(){_error_("not implemented");};
@@ -167,5 +168,5 @@
 		void        VerticalSegmentIndices(int** pindices,int* pnumseg){_error_("not implemented yet");};
 		void        VerticalSegmentIndicesBase(int** pindices,int* pnumseg){_error_("not implemented yet");};
-		void        ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input);
+		void        ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input);
 
 #ifdef _HAVE_GIAIVINS_
Index: /issm/trunk/src/c/classes/Elements/Tria.cpp
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tria.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Tria.cpp	(revision 24686)
@@ -14,4 +14,9 @@
 #include <math.h>
 #include "../classes.h"
+#include "../Inputs2/TriaInput2.h"
+#include "../Inputs2/PentaInput2.h"
+#include "../Inputs2/ControlInput2.h"
+#include "../Inputs2/DatasetInput2.h"
+#include "../Inputs2/TransientInput2.h"
 #include "../../shared/shared.h"
 #ifdef _HAVE_GIAIVINS_
@@ -25,16 +30,16 @@
 
 /*Constructors/destructor/copy*/
-Tria::Tria(int tria_id, int tria_sid, IoModel* iomodel,int nummodels)/*{{{*/
+Tria::Tria(int tria_id,int tria_sid,int tria_lid,IoModel* iomodel,int nummodels)/*{{{*/
 	:ElementHook(nummodels,tria_id,NUMVERTICES,iomodel){
+
+		this->iscollapsed = 0;
 
 		/*id: */
 		this->id  = tria_id;
 		this->sid = tria_sid;
-
-		//this->parameters: we still can't point to it, it may not even exist. Configure will handle this.
+		this->lid = tria_lid;
+
+		/*this->parameters: we still can't point to it, it may not even exist. Configure will handle this.*/
 		this->parameters = NULL;
-
-		/*intialize inputs: */
-		this->inputs  = new Inputs();
 
 		/*initialize pointers:*/
@@ -48,6 +53,30 @@
 		else this->element_type_list = NULL;
 
-}
-/*}}}*/
+		/*surface and base*/
+		IssmDouble sum;
+		this->isonsurface = false;
+		this->isonbase    = false;
+		switch(iomodel->domaintype){
+			case Domain2DverticalEnum:
+				_assert_(iomodel->Data("md.mesh.vertexonsurface"));
+				_assert_(iomodel->Data("md.mesh.vertexonbase"));
+				sum = 0.;
+				for(int i=0;i<NUMVERTICES;i++) sum += iomodel->Data("md.mesh.vertexonsurface")[reCast<int>(iomodel->elements[(tria_id-1)*NUMVERTICES+i])-1];
+				_assert_(sum>=0 && sum<3);
+				if(sum>1.) this->isonsurface = true;
+				sum = 0.;
+				for(int i=0;i<NUMVERTICES;i++) sum += iomodel->Data("md.mesh.vertexonbase")[reCast<int>(iomodel->elements[(tria_id-1)*NUMVERTICES+i])-1];
+				_assert_(sum>=0 && sum<3);
+				if(sum>1.) this->isonbase = true;
+				break;
+			case Domain2DhorizontalEnum:
+			case Domain3DsurfaceEnum:
+				this->isonsurface = true;
+				this->isonbase    = true;
+				break;
+			default: _error_("mesh "<<EnumToStringx(iomodel->domaintype)<<" not supported yet");
+		}
+
+}/*}}}*/
 Tria::~Tria(){/*{{{*/
 	this->parameters=NULL;
@@ -60,4 +89,6 @@
 
 	tria=new Tria();
+
+	tria->iscollapsed=this->iscollapsed;
 
 	//deal with TriaRef mother class
@@ -91,6 +122,7 @@
 	tria->id  = this->id;
 	tria->sid = this->sid;
-	if(this->inputs) tria->inputs = (Inputs*)(this->inputs->Copy());
-	else tria->inputs=new Inputs();
+	tria->lid = this->lid;
+	tria->isonbase  = this->isonbase;
+	tria->isonsurface  = this->isonsurface;
 
 	/*point parameters: */
@@ -114,4 +146,7 @@
 
 	MARSHALLING_ENUM(TriaEnum);
+	MARSHALLING(this->iscollapsed);
+	MARSHALLING(this->isonsurface);
+	MARSHALLING(this->isonbase);
 
 	/*Call parent classes: */
@@ -127,8 +162,8 @@
 
 /*Other*/
-void       Tria::AddBasalInput(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
+void       Tria::AddBasalInput2(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
 
 	/*Call inputs method*/
-	_assert_(this->inputs);
+	_assert_(this->inputs2);
 
 	int domaintype;
@@ -136,21 +171,9 @@
 	switch(domaintype){
 		case Domain2DhorizontalEnum:
-			this->inputs->AddInput(new TriaInput(input_enum,values,interpolation_enum));
+			this->AddInput2(input_enum,values,interpolation_enum);
 			break;
 		case Domain2DverticalEnum:{
-			if(interpolation_enum==P1Enum){
-				IssmDouble values2[NUMVERTICES]={0.};
-				int        numindices;
-				int       *indices = NULL;
-				int        index = this->EdgeOnBaseIndex();
-				NodeOnEdgeIndices(&numindices,&indices,index,this->FiniteElement());
-				for(int i=0;i<numindices;i++){
-					values2[indices[i]] = values[i];
-				}
-				this->inputs->AddInput(new TriaInput(input_enum,values2,interpolation_enum));
-				xDelete<int>(indices);
-			}
-			else _error_("not implemented yet");
-			}
+			_error_("not implemented yet");
+										  }
 			break;
 		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
@@ -159,39 +182,76 @@
 }
 /*}}}*/
-void       Tria::AddInput(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
+void       Tria::AddInput2(int input_enum,IssmDouble* values, int interpolation_enum){/*{{{*/
+
+	/*Intermediaries*/
+	int vertexlids[NUMVERTICES];
 
 	/*Call inputs method*/
-	_assert_(this->inputs);
-	this->inputs->AddInput(new TriaInput(input_enum,values,interpolation_enum));
-}
-/*}}}*/
-void       Tria::AddControlInput(int input_enum,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id){/*{{{*/
+	if(!this->inputs2){
+		int* temp = xNew<int>(3);
+		_error_("inputs2 not set");
+	}
+	_assert_(this->inputs2);
+	switch(interpolation_enum){
+		case P1Enum:
+			for(int i=0;i<NUMVERTICES;i++) vertexlids[i]=this->vertices[i]->lid;
+			inputs2->SetTriaInput(input_enum,interpolation_enum,NUMVERTICES,vertexlids,values);
+			break;
+		case P1DGEnum:
+			for(int i=0;i<NUMVERTICES;i++) vertexlids[i]=this->vertices[i]->lid;
+			inputs2->SetTriaInput(input_enum,interpolation_enum,this->lid,NUMVERTICES,values);
+			break;
+		default:
+			inputs2->SetTriaInput(input_enum,interpolation_enum,this->lid,this->GetNumberOfNodes(interpolation_enum),values);
+	}
+
+}
+/*}}}*/
+void       Tria::AddControlInput(int input_enum,Inputs2* inputs2,IoModel* iomodel,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id){/*{{{*/
+
+	/*Intermediaries*/
+	int vertexlids[NUMVERTICES];
+
+	_assert_(iomodel->elements);
+	for(int i=0;i<NUMVERTICES;i++){
+		int vertexid =reCast<int>(iomodel->elements[NUMVERTICES*this->Sid()+i]); //ids for vertices are in the elements array from Matlab
+		vertexlids[i]=iomodel->my_vertices_lids[vertexid-1];
+	}
 
 	/*Call inputs method*/
-	_assert_(this->inputs);
-	this->inputs->AddInput(new ControlInput(input_enum,TriaInputEnum,values,values_min,values_max,interpolation_enum,id));
-}
-/*}}}*/
-void       Tria::DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,IoModel* iomodel,int input_enum){/*{{{*/
-
+	switch(interpolation_enum){
+		case P1Enum:
+			inputs2->SetTriaControlInput(input_enum,TriaInput2Enum,interpolation_enum,id,NUMVERTICES,vertexlids,values,values_min,values_max);
+			break;
+		default:
+			_error_("Cannot add \""<<EnumToStringx(input_enum)<<"\" interpolation "<<EnumToStringx(interpolation_enum)<<" not supported");
+	}
+
+}
+/*}}}*/
+void       Tria::DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,Inputs2* inputs2,IoModel* iomodel,int input_enum){/*{{{*/
+
+	/*Intermediaries*/
+	int        vertexsids[NUMVERTICES];
+	int        vertexlids[NUMVERTICES];
 	IssmDouble nodeinputs[NUMVERTICES];
-	if(num_inputs<1) _error_("Cannot create a DatasetInput of size <1");
-	if(M!=iomodel->numberofvertices) _error_("not supported yet");
-	if(N!=num_inputs) _error_("sizes are not consistent");
-
-	int        tria_vertex_ids[3];
-
-	for(int k=0;k<3;k++){
-		tria_vertex_ids[k]=reCast<int>(iomodel->elements[3*this->Sid()+k]); //ids for vertices are in the elements array from Matlab
-	}
+
+	/*Some sanity checks*/
+	if(num_inputs<1)                 _error_("Cannot create a DatasetInput of size <1");
+	if(M!=iomodel->numberofvertices) _error_("Input size not supported yet");
+	if(N!=num_inputs)                _error_("Sizes are not consistent");
+
+	/*Get indices*/
+	_assert_(iomodel->elements);
+	for(int i=0;i<NUMVERTICES;i++){
+		vertexsids[i] = reCast<int>(iomodel->elements[NUMVERTICES*this->Sid()+i])-1;
+		vertexlids[i] = iomodel->my_vertices_lids[vertexsids[i]];
+	}
+
 	/*Create inputs and add to DataSetInput*/
-	DatasetInput* datasetinput=new DatasetInput(input_enum);
 	for(int i=0;i<num_inputs;i++){
-		for(int j=0;j<NUMVERTICES;j++)nodeinputs[j]=array[(tria_vertex_ids[j]-1)*N+i];
-		datasetinput->AddInput(new TriaInput(input_enum,nodeinputs,P1Enum),individual_enums[i]);
-	}
-
-	/*Add datasetinput to element inputs*/
-	this->inputs->AddInput(datasetinput);
+		for(int j=0;j<NUMVERTICES;j++) nodeinputs[j]=array[vertexsids[j]*N+i];
+		inputs2->SetTriaDatasetInput(input_enum,individual_enums[i],P1Enum,NUMVERTICES,vertexlids,nodeinputs);
+	}
 }
 /*}}}*/
@@ -251,13 +311,13 @@
 
 	/*Retrieve all inputs and parameters we will need*/
-	Input* vx_input = inputs->GetInput(VxEnum); _assert_(vx_input);
-	Input* vy_input = inputs->GetInput(VyEnum); _assert_(vy_input);
-	Input* B_input  = inputs->GetInput(MaterialsRheologyBbarEnum);   _assert_(B_input);
-	Input* gr_input = inputs->GetInput(MaskGroundediceLevelsetEnum); _assert_(gr_input);
-	Input* bs_input = inputs->GetInput(BaseEnum);                    _assert_(bs_input);
-	Input* smax_fl_input = inputs->GetInput(CalvingStressThresholdFloatingiceEnum); _assert_(smax_fl_input);
-	Input* smax_gr_input = inputs->GetInput(CalvingStressThresholdGroundediceEnum); _assert_(smax_gr_input);
-	Input* n_input  = inputs->GetInput(MaterialsRheologyNEnum); _assert_(n_input);
-	Input* sl_input  = inputs->GetInput(SealevelEnum); _assert_(sl_input);
+	Input2* vx_input = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyEnum); _assert_(vy_input);
+	Input2* B_input  = this->GetInput2(MaterialsRheologyBbarEnum);   _assert_(B_input);
+	Input2* gr_input = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gr_input);
+	Input2* bs_input = this->GetInput2(BaseEnum);                    _assert_(bs_input);
+	Input2* smax_fl_input = this->GetInput2(CalvingStressThresholdFloatingiceEnum); _assert_(smax_fl_input);
+	Input2* smax_gr_input = this->GetInput2(CalvingStressThresholdGroundediceEnum); _assert_(smax_gr_input);
+	Input2* n_input  = this->GetInput2(MaterialsRheologyNEnum); _assert_(n_input);
+	Input2* sl_input  = this->GetInput2(SealevelEnum); _assert_(sl_input);
 
 
@@ -320,8 +380,8 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new TriaInput(CalvingratexEnum,&calvingratex[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(CalvingrateyEnum,&calvingratey[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(CalvingCalvingrateEnum,&calvingrate[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(SigmaVMEnum,&sigma_vm[0],P1Enum));
+	this->AddInput2(CalvingratexEnum,&calvingratex[0],P1DGEnum);
+	this->AddInput2(CalvingrateyEnum,&calvingratey[0],P1DGEnum);
+	this->AddInput2(CalvingCalvingrateEnum,&calvingrate[0],P1DGEnum);
+	this->AddInput2(SigmaVMEnum,&sigma_vm[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -351,17 +411,17 @@
 	IssmDouble constant_g     = this->FindParam(ConstantsGEnum);
 
-	Input*   H_input                 = inputs->GetInput(ThicknessEnum); _assert_(H_input);
-	Input*   bed_input               = inputs->GetInput(BedEnum); _assert_(bed_input);
-	Input*   surface_input           = inputs->GetInput(SurfaceEnum); _assert_(surface_input);
-	Input*	strainrateparallel_input  = inputs->GetInput(StrainRateparallelEnum);  _assert_(strainrateparallel_input);
-	Input*	strainrateeffective_input = inputs->GetInput(StrainRateeffectiveEnum); _assert_(strainrateeffective_input);
-	Input*	vx_input                  = inputs->GetInput(VxEnum); _assert_(vx_input);
-	Input*	vy_input                  = inputs->GetInput(VxEnum); _assert_(vy_input);
-	Input*   waterheight_input       = inputs->GetInput(WaterheightEnum); _assert_(waterheight_input);
-	Input*   s_xx_input              = inputs->GetInput(DeviatoricStressxxEnum);     _assert_(s_xx_input);
-	Input*   s_xy_input              = inputs->GetInput(DeviatoricStressxyEnum);     _assert_(s_xy_input);
-	Input*   s_yy_input              = inputs->GetInput(DeviatoricStressyyEnum);     _assert_(s_yy_input);
-	Input*	B_input  = inputs->GetInput(MaterialsRheologyBbarEnum);   _assert_(B_input);
-	Input*	n_input  = inputs->GetInput(MaterialsRheologyNEnum);   _assert_(n_input);
+	Input2*   H_input                 = this->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2*   bed_input               = this->GetInput2(BedEnum); _assert_(bed_input);
+	Input2*   surface_input           = this->GetInput2(SurfaceEnum); _assert_(surface_input);
+	Input2*	strainrateparallel_input  = this->GetInput2(StrainRateparallelEnum);  _assert_(strainrateparallel_input);
+	Input2*	strainrateeffective_input = this->GetInput2(StrainRateeffectiveEnum); _assert_(strainrateeffective_input);
+	Input2*	vx_input                  = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2*	vy_input                  = this->GetInput2(VxEnum); _assert_(vy_input);
+	Input2*   waterheight_input       = this->GetInput2(WaterheightEnum); _assert_(waterheight_input);
+	Input2*   s_xx_input              = this->GetInput2(DeviatoricStressxxEnum);     _assert_(s_xx_input);
+	Input2*   s_xy_input              = this->GetInput2(DeviatoricStressxyEnum);     _assert_(s_xy_input);
+	Input2*   s_yy_input              = this->GetInput2(DeviatoricStressyyEnum);     _assert_(s_yy_input);
+	Input2*	B_input  = this->GetInput2(MaterialsRheologyBbarEnum);   _assert_(B_input);
+	Input2*	n_input  = this->GetInput2(MaterialsRheologyNEnum);   _assert_(n_input);
 
 	/*Loop over all elements of this partition*/
@@ -420,7 +480,7 @@
 	}
 
-	this->inputs->AddInput(new TriaInput(SurfaceCrevasseEnum,&surface_crevasse[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(BasalCrevasseEnum,&basal_crevasse[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(CrevasseDepthEnum,&crevasse_depth[0],P1Enum));
+	this->AddInput2(SurfaceCrevasseEnum,&surface_crevasse[0],P1DGEnum);
+	this->AddInput2(BasalCrevasseEnum,&basal_crevasse[0],P1DGEnum);
+	this->AddInput2(CrevasseDepthEnum,&crevasse_depth[0],P1DGEnum);
 
 	delete gauss;
@@ -442,10 +502,10 @@
 
 	/*Retrieve all inputs and parameters we will need*/
-	Input* vx_input=inputs->GetInput(VxEnum);													_assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);													_assert_(vy_input);
-	Input* bs_input = inputs->GetInput(BaseEnum);                                 _assert_(bs_input);
-	Input* strainparallel_input=inputs->GetInput(StrainRateparallelEnum);			_assert_(strainparallel_input);
-	Input* strainperpendicular_input=inputs->GetInput(StrainRateperpendicularEnum);_assert_(strainperpendicular_input);
-	Input* levermanncoeff_input=inputs->GetInput(CalvinglevermannCoeffEnum);      _assert_(levermanncoeff_input);
+	Input2* vx_input=this->GetInput2(VxEnum);													_assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);													_assert_(vy_input);
+	Input2* bs_input = this->GetInput2(BaseEnum);                                 _assert_(bs_input);
+	Input2* strainparallel_input=this->GetInput2(StrainRateparallelEnum);			_assert_(strainparallel_input);
+	Input2* strainperpendicular_input=this->GetInput2(StrainRateperpendicularEnum);_assert_(strainperpendicular_input);
+	Input2* levermanncoeff_input=this->GetInput2(CalvinglevermannCoeffEnum);      _assert_(levermanncoeff_input);
 
 	/* Start looping on the number of vertices: */
@@ -475,7 +535,7 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new TriaInput(CalvingratexEnum,&calvingratex[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(CalvingrateyEnum,&calvingratey[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(CalvingCalvingrateEnum,&calvingrate[0],P1Enum));
+	this->AddInput2(CalvingratexEnum,&calvingratex[0],P1DGEnum);
+	this->AddInput2(CalvingrateyEnum,&calvingratey[0],P1DGEnum);
+	this->AddInput2(CalvingCalvingrateEnum,&calvingrate[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -489,5 +549,5 @@
 	if(!IsIceInElement() || !IsZeroLevelset(MaskIceLevelsetEnum)){
 		IssmDouble flux_per_area=0;
-		this->inputs->AddInput(new TriaInput(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum);
 	}
 	else{
@@ -503,5 +563,5 @@
 		/*Recover parameters and values*/
 		parameters->FindParam(&domaintype,DomainTypeEnum);
-		GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+		Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 		/*Be sure that values are not zero*/
@@ -585,14 +645,14 @@
 		IssmDouble calvingratex,calvingratey,thickness,Jdet,flux_per_area;
 		IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-		Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-		Input* calvingratex_input=NULL;
-		Input* calvingratey_input=NULL;
+		Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+		Input2* calvingratex_input=NULL;
+		Input2* calvingratey_input=NULL;
 		if(domaintype==Domain2DhorizontalEnum){
-			calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-			calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
+			calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+			calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
 		}
 		else{
-			calvingratex_input=inputs->GetInput(CalvingratexAverageEnum); _assert_(calvingratex_input);
-			calvingratey_input=inputs->GetInput(CalvingrateyAverageEnum); _assert_(calvingratey_input);
+			calvingratex_input=this->GetInput2(CalvingratexAverageEnum); _assert_(calvingratex_input);
+			calvingratey_input=this->GetInput2(CalvingrateyAverageEnum); _assert_(calvingratey_input);
 		}
 
@@ -613,5 +673,5 @@
 		}
 
-		this->inputs->AddInput(new TriaInput(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingFluxLevelsetEnum,&flux_per_area,P0Enum);
 
 		/*Clean up and return*/
@@ -625,5 +685,5 @@
 	if(!IsIceInElement() || !IsZeroLevelset(MaskIceLevelsetEnum)){
 		IssmDouble flux_per_area=0;
-		this->inputs->AddInput(new TriaInput(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum);
 	}
 	else{
@@ -640,5 +700,5 @@
 		/*Recover parameters and values*/
 		parameters->FindParam(&domaintype,DomainTypeEnum);
-		GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+		Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 		/*Be sure that values are not zero*/
@@ -722,20 +782,20 @@
 		IssmDouble calvingratex,calvingratey,vx,vy,vel,meltingrate,meltingratex,meltingratey,thickness,Jdet,flux_per_area;
 		IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-		Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-		Input* calvingratex_input=NULL;
-		Input* calvingratey_input=NULL;
-		Input* vx_input=NULL;
-		Input* vy_input=NULL;
-		Input* meltingrate_input=NULL;
+		Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+		Input2* calvingratex_input=NULL;
+		Input2* calvingratey_input=NULL;
+		Input2* vx_input=NULL;
+		Input2* vy_input=NULL;
+		Input2* meltingrate_input=NULL;
 		if(domaintype==Domain2DhorizontalEnum){
-			calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-			calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
-			vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-			vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
-			meltingrate_input=inputs->GetInput(CalvingMeltingrateEnum); _assert_(meltingrate_input);
+			calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+			calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
+			vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+			vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
+			meltingrate_input=this->GetInput2(CalvingMeltingrateEnum); _assert_(meltingrate_input);
 		}
 		else{
-			calvingratex_input=inputs->GetInput(CalvingratexAverageEnum); _assert_(calvingratex_input);
-			calvingratey_input=inputs->GetInput(CalvingrateyAverageEnum); _assert_(calvingratey_input);
+			calvingratex_input=this->GetInput2(CalvingratexAverageEnum); _assert_(calvingratex_input);
+			calvingratey_input=this->GetInput2(CalvingrateyAverageEnum); _assert_(calvingratey_input);
 		}
 
@@ -762,5 +822,5 @@
 		}
 
-		this->inputs->AddInput(new TriaInput(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum));
+		this->AddInput2(CalvingMeltingFluxLevelsetEnum,&flux_per_area,P0Enum);
 
 		/*Clean up and return*/
@@ -797,5 +857,5 @@
 	/*Get approximation*/
 	int approximation;
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 
 	/* Get node coordinates and dof list: */
@@ -804,6 +864,6 @@
 	/*Retrieve all inputs we will be needing: */
 	this->FindParam(&domaintype,DomainTypeEnum);
-	Input* vx_input=inputs->GetInput(VxEnum);             _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);             _assert_(vy_input);
+	Input2* vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
 
 	/* Start looping on the number of vertices: */
@@ -839,13 +899,13 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->inputs->AddInput(new TriaInput(DeviatoricStressxxEnum,&tau_xx[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStressxyEnum,&tau_xy[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStressxzEnum,&tau_xz[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStressyyEnum,&tau_yy[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStressyzEnum,&tau_yz[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStresszzEnum,&tau_zz[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStresseffectiveEnum,&tau_e[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStress1Enum,&tau_1[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(DeviatoricStress2Enum,&tau_2[0],P1Enum));
+	this->AddInput2(DeviatoricStressxxEnum,&tau_xx[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressxyEnum,&tau_xy[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressxzEnum,&tau_xz[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressyyEnum,&tau_yy[0],P1DGEnum);
+	this->AddInput2(DeviatoricStressyzEnum,&tau_yz[0],P1DGEnum);
+	this->AddInput2(DeviatoricStresszzEnum,&tau_zz[0],P1DGEnum);
+	this->AddInput2(DeviatoricStresseffectiveEnum,&tau_e[0],P1DGEnum);
+	this->AddInput2(DeviatoricStress1Enum,&tau_1[0],P1DGEnum);
+	this->AddInput2(DeviatoricStress2Enum,&tau_2[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -867,6 +927,6 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* vx_input=this->GetInput(EsaXmotionEnum); _assert_(vx_input);
-	Input* vy_input=this->GetInput(EsaYmotionEnum); _assert_(vy_input);
+	Input2* vx_input=this->GetInput2(EsaXmotionEnum); _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(EsaYmotionEnum); _assert_(vy_input);
 
 	/* Start looping on the number of vertices: */
@@ -886,8 +946,8 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->inputs->AddInput(new TriaInput(EsaStrainratexxEnum,&strain_xx[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(EsaStrainrateyyEnum,&strain_yy[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(EsaStrainratexyEnum,&strain_xy[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(EsaRotationrateEnum,&vorticity_xy[0],P1Enum));
+	this->AddInput2(EsaStrainratexxEnum,&strain_xx[0],P1DGEnum);
+	this->AddInput2(EsaStrainrateyyEnum,&strain_yy[0],P1DGEnum);
+	this->AddInput2(EsaStrainratexyEnum,&strain_xy[0],P1DGEnum);
+	this->AddInput2(EsaRotationrateEnum,&vorticity_xy[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -899,5 +959,5 @@
 	if(!IsOnBase()){
 		IssmDouble sigma_nn[3]={0.};
-		this->inputs->AddInput(new TriaInput(SigmaNNEnum,&sigma_nn[0],P1Enum));
+		this->AddInput2(SigmaNNEnum,&sigma_nn[0],P1Enum);
 		return;
 	}
@@ -919,7 +979,7 @@
 		this->FindParam(&domaintype,DomainTypeEnum);
 		if(domaintype==Domain2DhorizontalEnum) _error_("stress tensor calculation not supported for mesh of type " <<EnumToStringx(domaintype)<<", extrude mesh or call ComputeDeviatoricStressTensor");
-		Input* pressure_input=inputs->GetInput(PressureEnum); _assert_(pressure_input);
-		Input* vx_input=inputs->GetInput(VxEnum);             _assert_(vx_input);
-		Input* vy_input=inputs->GetInput(VyEnum);             _assert_(vy_input);
+		Input2* pressure_input=this->GetInput2(PressureEnum); _assert_(pressure_input);
+		Input2* vx_input=this->GetInput2(VxEnum);             _assert_(vx_input);
+		Input2* vy_input=this->GetInput2(VyEnum);             _assert_(vy_input);
 
 		/* Start looping on the number of vertices: */
@@ -946,5 +1006,5 @@
 
 		/*Add Stress tensor components into inputs*/
-		this->inputs->AddInput(new TriaInput(SigmaNNEnum,&sigma_nn[0],P1Enum));
+		this->AddInput2(SigmaNNEnum,&sigma_nn[0],P1Enum);
 
 		/*Clean up and return*/
@@ -975,7 +1035,7 @@
 	this->FindParam(&domaintype,DomainTypeEnum);
 	if(domaintype==Domain2DhorizontalEnum) _error_("stress tensor calculation not supported for mesh of type " <<EnumToStringx(domaintype)<<", extrude mesh or call ComputeDeviatoricStressTensor");
-	Input* pressure_input=inputs->GetInput(PressureEnum); _assert_(pressure_input);
-	Input* vx_input=inputs->GetInput(VxEnum);             _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);             _assert_(vy_input);
+	Input2* pressure_input=this->GetInput2(PressureEnum); _assert_(pressure_input);
+	Input2* vx_input=this->GetInput2(VxEnum);             _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);             _assert_(vy_input);
 
 	/* Start looping on the number of vertices: */
@@ -996,10 +1056,10 @@
 
 	/*Add Stress tensor components into inputs*/
-	this->inputs->AddInput(new TriaInput(StressTensorxxEnum,&sigma_xx[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(StressTensorxyEnum,&sigma_xy[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(StressTensorxzEnum,&sigma_xz[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(StressTensoryyEnum,&sigma_yy[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(StressTensoryzEnum,&sigma_yz[0],P1Enum));
-	this->inputs->AddInput(new TriaInput(StressTensorzzEnum,&sigma_zz[0],P1Enum));
+	this->AddInput2(StressTensorxxEnum,&sigma_xx[0],P1DGEnum);
+	this->AddInput2(StressTensorxyEnum,&sigma_xy[0],P1DGEnum);
+	this->AddInput2(StressTensorxzEnum,&sigma_xz[0],P1DGEnum);
+	this->AddInput2(StressTensoryyEnum,&sigma_yy[0],P1DGEnum);
+	this->AddInput2(StressTensoryzEnum,&sigma_yz[0],P1DGEnum);
+	this->AddInput2(StressTensorzzEnum,&sigma_zz[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -1007,5 +1067,5 @@
 }
 /*}}}*/
-void       Tria::Configure(Elements* elementsin, Loads* loadsin,Nodes* nodesin,Vertices *verticesin,Materials* materialsin, Parameters* parametersin){/*{{{*/
+void       Tria::Configure(Elements* elementsin, Loads* loadsin,Nodes* nodesin,Vertices *verticesin,Materials* materialsin, Parameters* parametersin,Inputs2* inputs2in){/*{{{*/
 
 	/*go into parameters and get the analysis_counter: */
@@ -1034,38 +1094,26 @@
 	/*point parameters to real dataset: */
 	this->parameters=parametersin;
-
-	/*get inputs configured too: */
-	this->inputs->Configure(this->parameters);
-
-}
-/*}}}*/
+	this->inputs2=inputs2in;
+}/*}}}*/
 void       Tria::ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N, int M){/*{{{*/
 
-	int    idlist[NUMVERTICES];
-	int	gradidlist[NUMVERTICES];
-	IssmDouble grad_list[NUMVERTICES];
-	Input* grad_input=NULL;
-
-	Input* input=inputs->GetInput(enum_type);
-	if (!input) _error_("Input " << EnumToStringx(enum_type) << " not found");
-	if (input->ObjectEnum()!=ControlInputEnum) _error_("Input " << EnumToStringx(enum_type) << " is not a ControlInput");
+	int         idlist[NUMVERTICES];
+	int         vertexlids[NUMVERTICES];
+	int         gradidlist[NUMVERTICES];
+	IssmDouble  grad_list[NUMVERTICES];
 
 	GradientIndexing(&gradidlist[0],control_index);
-
-	for(int n=0;n<N;n++){
-		for(int i=0;i<NUMVERTICES;i++){
-			idlist[i] = offset + this->vertices[i]->Sid()+n*M;
-			grad_list[i]=gradient[idlist[i]];
-		}
-
-		ControlInput* controlinput = xDynamicCast<ControlInput*>(input);
-		if(controlinput->layout_enum!=TransientInputEnum){
-			grad_input=new TriaInput(GradientEnum,grad_list,P1Enum);
-			controlinput->SetGradient(grad_input);
-		}
-		else{
-			grad_input = new TriaInput(GradientEnum,grad_list,P1Enum);
-			controlinput->SetGradient(grad_input,n);
-			controlinput->Configure(parameters);
+	for(int i=0;i<NUMVERTICES;i++) vertexlids[i]=this->vertices[i]->lid;
+
+	if(N==1){
+		this->inputs2->SetTriaControlInputGradient(enum_type,P1Enum,NUMVERTICES,&vertexlids[0],&grad_list[0]);
+	}
+	else{
+		for(int n=0;n<N;n++){
+			for(int i=0;i<NUMVERTICES;i++){
+				idlist[i] = offset + this->vertices[i]->Sid()+n*M;
+				grad_list[i]=gradient[idlist[i]];
+			}
+			this->inputs2->SetTriaControlInputGradient(enum_type,P1Enum,NUMVERTICES,&vertexlids[0],&grad_list[0],n);
 		}
 	}
@@ -1074,26 +1122,19 @@
 void       Tria::ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index){/*{{{*/
 
-	int    vertexpidlist[NUMVERTICES];
+	int        idlist[NUMVERTICES];
+	int        vertexlids[NUMVERTICES];
 	IssmDouble grad_list[NUMVERTICES];
-	Input* grad_input=NULL;
-
-	Input* input=inputs->GetInput(enum_type);
-	if (!input) _error_("Input " << EnumToStringx(enum_type) << " not found");
-	if (input->ObjectEnum()!=ControlInputEnum) _error_("Input " << EnumToStringx(enum_type) << " is not a ControlInput");
-
-	GradientIndexing(&vertexpidlist[0],control_index);
-	for(int i=0;i<NUMVERTICES;i++) grad_list[i]=gradient[vertexpidlist[i]];
-	grad_input=new TriaInput(GradientEnum,grad_list,P1Enum);
-
-	((ControlInput*)input)->SetGradient(grad_input);
+
+	GradientIndexing(&idlist[0],control_index);
+	for(int i=0;i<NUMVERTICES;i++) grad_list[i]=gradient[idlist[i]];
+	for(int i=0;i<NUMVERTICES;i++) vertexlids[i]=this->vertices[i]->lid;
+
+	this->inputs2->SetTriaControlInputGradient(enum_type,P1Enum,NUMVERTICES,&vertexlids[0],&grad_list[0]);
 
 }/*}}}*/
 void       Tria::ControlToVectors(Vector<IssmPDouble>* vector_control, Vector<IssmPDouble>* vector_gradient,int control_enum){/*{{{*/
 
-	Input* input=inputs->GetInput(control_enum);
-	if (!input) _error_("Input " << EnumToStringx(control_enum) << " not found");
-	if (input->ObjectEnum()!=ControlInputEnum) _error_("Input " << EnumToStringx(control_enum) << " is not a ControlInput");
-
 	int         sidlist[NUMVERTICES];
+	int         lidlist[NUMVERTICES];
 	int         connectivity[NUMVERTICES];
 	IssmPDouble values[NUMVERTICES];
@@ -1103,4 +1144,10 @@
 	this->GetVerticesConnectivityList(&connectivity[0]);
 	this->GetVerticesSidList(&sidlist[0]);
+	this->GetVerticesLidList(&lidlist[0]);
+
+	ElementInput2* control_value    = this->inputs2->GetControlInput2Data(control_enum,"value");    _assert_(control_value);
+	ElementInput2* control_gradient = this->inputs2->GetControlInput2Data(control_enum,"gradient"); _assert_(control_gradient);
+	control_value->Serve(NUMVERTICES,&lidlist[0]);
+	control_gradient->Serve(NUMVERTICES,&lidlist[0]);
 
 	GaussTria* gauss=new GaussTria();
@@ -1108,6 +1155,6 @@
 		gauss->GaussVertex(iv);
 
-		((ControlInput*)input)->GetInputValue(&value,gauss);
-		((ControlInput*)input)->GetGradientValue(&gradient,gauss);
+		control_value->GetInputValue(&value,gauss);
+		control_gradient->GetInputValue(&gradient,gauss);
 
 		values[iv]    = reCast<IssmPDouble>(value)/reCast<IssmPDouble>(connectivity[iv]);
@@ -1124,5 +1171,5 @@
 	/*Get current field and vertex coordinates*/
 	IssmDouble ls[NUMVERTICES],distance;
-	GetInputListOnVertices(&ls[0],distanceenum);
+	Element::GetInputListOnVertices(&ls[0],distanceenum);
 
 	/*Get distance from list of segments and reset ls*/
@@ -1143,5 +1190,5 @@
 
 	/*Update Levelset*/
-	this->inputs->AddInput(new TriaInput(distanceenum,&ls[0],P1Enum));
+	this->AddInput2(distanceenum,&ls[0],P1Enum);
 }
 /*}}}*/
@@ -1152,5 +1199,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
 
 	for(int i=0;i<3;i++){
@@ -1170,5 +1217,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
 
 	for(int i=0;i<3;i++){
@@ -1190,5 +1237,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
 
 	for(int i=0;i<3;i++){
@@ -1208,5 +1255,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
 
 	for(int i=0;i<3;i++){
@@ -1233,7 +1280,5 @@
 			/*Get input:*/
 			IssmDouble vel;
-			Input* vel_input;
-
-			vel_input=this->inputs->GetInput(VelEnum); _assert_(vel_input);
+			Input2* vel_input=this->GetInput2(VelEnum); _assert_(vel_input);
 			vel_input->GetInputAverage(&vel);
 
@@ -1291,5 +1336,5 @@
 	floatingarea=(1-phi)*this->GetArea();
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		floatingarea=floatingarea*scalefactor;
@@ -1306,5 +1351,5 @@
 
 	int approximation;
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 
 	if(approximation==HOApproximationEnum || approximation==SSAApproximationEnum || approximation==SSAHOApproximationEnum){
@@ -1317,9 +1362,9 @@
 	IssmDouble  sigmaxx[NUMVERTICES],sigmayy[NUMVERTICES],sigmaxy[NUMVERTICES],sigma_nn[NUMVERTICES];
 	IssmDouble  viscosity,epsilon[NUMVERTICES];
-	GetInputListOnVertices(&base[0],BaseEnum);
-	GetInputListOnVertices(&bed[0],BedEnum);
-	GetInputListOnVertices(&surface[0],SurfaceEnum);
-	GetInputListOnVertices(&pressure[0],PressureEnum);
-	GetInputListOnVertices(&phi[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&base[0],BaseEnum);
+	Element::GetInputListOnVertices(&bed[0],BedEnum);
+	Element::GetInputListOnVertices(&surface[0],SurfaceEnum);
+	Element::GetInputListOnVertices(&pressure[0],PressureEnum);
+	Element::GetInputListOnVertices(&phi[0],MaskGroundediceLevelsetEnum);
 	IssmDouble rho_ice   = FindParam(MaterialsRhoIceEnum);
 	IssmDouble rho_water = FindParam(MaterialsRhoSeawaterEnum);
@@ -1329,6 +1374,6 @@
 	GetVerticesCoordinates(&xyz_list);
 	/*Retrieve all inputs we will be needing: */
-	Input* vx_input       = inputs->GetInput(VxEnum);       _assert_(vx_input);
-	Input* vy_input       = inputs->GetInput(VyEnum);       _assert_(vy_input);
+	Input2* vx_input       = this->GetInput2(VxEnum);       _assert_(vx_input);
+	Input2* vy_input       = this->GetInput2(VyEnum);       _assert_(vy_input);
 
 	/*1. Recover stresses at the base*/
@@ -1527,5 +1572,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -1583,5 +1628,5 @@
 	/*Recover parameters and values*/
 	parameters->FindParam(&domaintype,DomainTypeEnum);
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -1709,8 +1754,8 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&bed[0],BedEnum);
-	GetInputListOnVertices(&surfaces[0],SurfaceEnum);
-	GetInputListOnVertices(&bases[0],BaseEnum);
-	GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&bed[0],BedEnum);
+	Element::GetInputListOnVertices(&surfaces[0],SurfaceEnum);
+	Element::GetInputListOnVertices(&bases[0],BaseEnum);
+	Element::GetInputListOnVertices(&lsf[0],MaskIceLevelsetEnum);
 
 	nrfrontbed=0;
@@ -1803,5 +1848,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&levelset[0],levelsetenum);
+	Element::GetInputListOnVertices(&levelset[0],levelsetenum);
 
 	/* Get nodes where there is no ice */
@@ -1832,7 +1877,399 @@
 	*pxyz_front=xyz_front;
 }/*}}}*/
+Input2*    Tria::GetInput2(int inputenum){/*{{{*/
+
+	/*Get Input from dataset*/
+	if(this->iscollapsed){
+		PentaInput2* input = this->inputs2->GetPentaInput(inputenum);
+		if(!input) return input;
+
+		this->InputServe(input);
+		return input;
+	}
+	else{
+		TriaInput2* input = this->inputs2->GetTriaInput(inputenum);
+		if(!input) return input;
+
+		this->InputServe(input);
+		return input;
+	}
+}/*}}}*/
+Input2*    Tria::GetInput2(int inputenum,IssmDouble time){/*{{{*/
+
+	/*Get Input from dataset*/
+	if(this->iscollapsed){
+		PentaInput2* input = this->inputs2->GetPentaInput(inputenum,time);
+		if(!input) return input;
+
+		this->InputServe(input);
+		return input;
+	}
+	else{
+		TriaInput2* input = this->inputs2->GetTriaInput(inputenum,time);
+		if(!input) return input;
+
+		this->InputServe(input);
+		return input;
+	}
+}/*}}}*/
+Input2*    Tria::GetInput2(int inputenum,IssmDouble start_time, IssmDouble end_time){/*{{{*/
+
+	/*Get Input from dataset*/
+	if(this->iscollapsed){
+		_error_("Get Average input not implemented in Penta yet");
+	}
+	else{
+		TriaInput2* input = this->inputs2->GetTriaInput(inputenum,start_time, end_time);
+		if(!input) return input;
+
+		this->InputServe(input);
+		return input;
+	}
+}/*}}}*/
+void       Tria::GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussTria gauss;
+		for(int iv=0;iv<NUMVERTICES;iv++){
+			gauss.GaussVertex(iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<NUMVERTICES;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
+void       Tria::GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value){/*{{{*/
+
+	/*Checks in debugging mode*/
+	_assert_(pvalue);
+
+	/*What type of finite element are we dealing with?*/
+	int fe       = this->FiniteElement();
+	int numnodes = this->GetNumberOfNodes();
+
+	/* Start looping on the number of vertices: */
+	if(input){
+		GaussTria gauss;
+		for(int iv=0;iv<numnodes;iv++){
+			gauss.GaussNode(fe,iv);
+			input->GetInputValue(&pvalue[iv],&gauss);
+		}
+	}
+	else{
+		for(int iv=0;iv<numnodes;iv++) pvalue[iv] = default_value;
+	}
+}
+/*}}}*/
+void       Tria::InputServe(Input2* input_in){/*{{{*/
+
+	/*Return NULL pointer if input is NULL*/
+	if(!input_in) return;
+
+	/*Get Input from dataset*/
+	if(this->iscollapsed){
+		_assert_(input_in->ObjectEnum()==PentaInput2Enum);
+		PentaInput2* input = xDynamicCast<PentaInput2*>(input_in);
+
+		/*Intermediaries*/
+		int numindices;
+		int indices[3];
+
+		/*Check interpolation*/
+		int interpolation = input->GetInterpolation();
+		switch(interpolation){
+			case P0Enum:
+				numindices = 1;
+				indices[0] = this->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1Enum:
+				numindices = 3;
+				for(int i=0;i<3;i++) indices[i] = vertices[i]->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1DGEnum:
+			case P1bubbleEnum:
+				input->ServeCollapsed(this->lid,this->iscollapsed);
+				break;
+			default: _error_("interpolation "<<EnumToStringx(interpolation)<<" not supported");
+		}
+
+		/*Flag as collapsed for later use*/
+		input->SetServeCollapsed(true);
+		return;
+	}
+	else{
+		_assert_(input_in->ObjectEnum()==TriaInput2Enum);
+		TriaInput2* input = xDynamicCast<TriaInput2*>(input_in);
+
+		/*Intermediaries*/
+		int numindices;
+		int indices[7];
+
+		/*Check interpolation*/
+		int interpolation = input->GetInterpolation();
+		switch(interpolation){
+			case P0Enum:
+				numindices = 1;
+				indices[0] = this->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1Enum:
+				numindices = 3;
+				for(int i=0;i<3;i++) indices[i] = vertices[i]->lid;
+				input->Serve(numindices,&indices[0]);
+				break;
+			case P1DGEnum:
+				numindices = 3;
+				input->Serve(this->lid,numindices);
+				break;
+			default:
+				input->Serve(this->lid,this->GetNumberOfNodes(interpolation));
+				break;
+		}
+		return;
+	}
+}/*}}}*/
+DatasetInput2* Tria::GetDatasetInput2(int inputenum){/*{{{*/
+
+	DatasetInput2* datasetinput = this->inputs2->GetDatasetInput2(inputenum);
+	if(!datasetinput) return NULL;
+
+	for(int i=0;i<datasetinput->GetNumIds();i++){
+
+		/*Get Input from dataset*/
+		if(this->iscollapsed){
+
+			PentaInput2* input = datasetinput->GetPentaInputByOffset(i); _assert_(input);
+
+			/*Intermediaries*/
+			int numindices;
+			int indices[3];
+
+			/*Check interpolation*/
+			int interpolation = input->GetInterpolation();
+			switch(interpolation){
+				case P0Enum:
+					numindices = 1;
+					indices[0] = this->lid;
+					input->Serve(numindices,&indices[0]);
+					break;
+				case P1Enum:
+					numindices = 3;
+					for(int i=0;i<3;i++) indices[i] = vertices[i]->lid;
+					input->Serve(numindices,&indices[0]);
+					break;
+				case P1DGEnum:
+				case P1bubbleEnum:
+					input->ServeCollapsed(this->lid,this->iscollapsed);
+					break;
+				default: _error_("interpolation "<<EnumToStringx(interpolation)<<" not supported");
+			}
+
+			/*Flag as collapsed for later use*/
+			input->SetServeCollapsed(true);
+		}
+		else{
+
+			TriaInput2* input = datasetinput->GetTriaInputByOffset(i); _assert_(input);
+
+			/*Intermediaries*/
+			int numindices;
+			int indices[7];
+
+			/*Check interpolation*/
+			int interpolation = input->GetInterpolation();
+			switch(interpolation){
+				case P0Enum:
+					numindices = 1;
+					indices[0] = this->lid;
+					input->Serve(numindices,&indices[0]);
+					break;
+				case P1Enum:
+					numindices = 3;
+					for(int i=0;i<3;i++) indices[i] = vertices[i]->lid;
+					input->Serve(numindices,&indices[0]);
+					break;
+				case P1DGEnum:
+					numindices = 3;
+					input->Serve(this->lid,numindices);
+					break;
+				default: _error_("interpolation "<<EnumToStringx(interpolation)<<" not supported");
+			}
+
+		}
+	}
+
+	return datasetinput;
+}/*}}}*/
+void       Tria::CreateInputTimeAverage(int transientinput_enum,int averagedinput_enum,IssmDouble start_time,IssmDouble end_time){/*{{{*/
+
+	_assert_(end_time>start_time);
+
+	/*Intermediaries*/
+	IssmDouble averaged_values[NUMVERTICES];
+	IssmDouble current_values[NUMVERTICES];
+	IssmDouble dt;
+	int        found,start_offset,end_offset;
+	int        averaging_method=0;
+
+
+	/*Get transient input time steps*/
+	int         numtimesteps;
+	IssmDouble *timesteps    = NULL;
+	TransientInput2* transient_input  = this->inputs2->GetTransientInput(transientinput_enum);
+	transient_input->GetAllTimes(&timesteps,&numtimesteps);
+
+	/*go through the timesteps, and grab offset for start and end*/
+	found=binary_search(&start_offset,start_time,timesteps,numtimesteps);
+	if(!found) _error_("Input not found (is TransientInput sorted ?)");
+	found=binary_search(&end_offset,end_time,timesteps,numtimesteps);
+	if(!found) _error_("Input not found (is TransientInput sorted ?)");
+
+	Gauss* gauss=this->NewGauss();
+
+	/*stack the input for each timestep in the slice*/
+	int offset = start_offset;
+	while(offset < end_offset ){
+		if(offset==-1){
+			/*get values for the first time: */
+			_assert_(start_time<timesteps[0]);
+			TriaInput2* input = transient_input->GetTriaInput(0);
+			_assert_(input->GetInterpolation()==P1Enum);
+			this->InputServe(input);
+			for(int iv=0;iv<NUMVERTICES;iv++){
+				gauss->GaussVertex(iv);
+				input->GetInputValue(&current_values[iv],gauss);
+			}
+			dt = timesteps[0] - start_time; _assert_(dt>0.);
+		}
+		else{
+			TriaInput2* input = transient_input->GetTriaInput(offset+1);
+			_assert_(input->GetInterpolation()==P1Enum);
+			this->InputServe(input);
+			for(int iv=0;iv<NUMVERTICES;iv++){
+				gauss->GaussVertex(iv);
+				input->GetInputValue(&current_values[iv],gauss);
+			}
+			if(offset == numtimesteps-1){
+				dt = end_time - timesteps[offset]; _assert_(dt>0.);
+			}
+			else{
+				dt = timesteps[offset+1] - timesteps[offset]; _assert_(dt>0.);
+			}
+		}
+
+		switch(averaging_method){
+			case 0: /*Arithmetic mean*/
+				if(offset==start_offset){
+					for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv]  = dt*current_values[iv];
+				}
+				else{
+					for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv] += dt*current_values[iv];
+				}
+				break;
+			case 1: /*Geometric mean*/
+				if(offset==start_offset){
+					for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv]  = dt*current_values[iv];
+				}
+				else{
+					for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv] *= dt*current_values[iv];
+				}
+				break;
+			case 2: /*Harmonic mean*/
+				if(offset==start_offset){
+					for(int iv=0;iv<NUMVERTICES;iv++){
+						_assert_(current_values[iv]>1.e-50);
+						averaged_values[iv]  = dt*1./current_values[iv];
+					}
+				}
+				else{
+					for(int iv=0;iv<NUMVERTICES;iv++){
+						_assert_(current_values[iv]>1.e-50);
+						averaged_values[iv]  += dt*1./current_values[iv];
+					}
+				}
+				break;
+			default:
+				_error_("averaging method is not recognised");
+		}
+
+		offset+=1;
+	}
+
+	/*Integration done, now normalize*/
+	switch(averaging_method){
+		case 0: //Arithmetic mean
+			for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv] =  averaged_values[iv]/(end_time - start_time);
+			break;
+		case 1: /*Geometric mean*/
+			for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv] = pow(averaged_values[iv], 1./(end_time - start_time));
+			break;
+		case 2: /*Harmonic mean*/
+			for(int iv=0;iv<NUMVERTICES;iv++) averaged_values[iv] = 1./(averaged_values[iv]/(end_time - start_time));
+			break;
+		default:
+			_error_("averaging method is not recognised");
+	}
+
+	this->AddInput2(averagedinput_enum,&averaged_values[0],P1Enum);
+
+	/*Cleanup*/
+	delete gauss;
+	xDelete<IssmDouble>(timesteps);
+}
+/*}}}*/
+void       Tria::GetInputAveragesUpToCurrentTime(int input_enum,IssmDouble** pvalues, IssmDouble** ptimes, int* pnumtimes, IssmDouble currenttime){/*{{{*/
+
+	/*Get transient input time steps*/
+	int         numtimesteps;
+	IssmDouble *timesteps    = NULL;
+	TransientInput2* transient_input  = this->inputs2->GetTransientInput(input_enum);
+
+	transient_input->GetAllTimes(&timesteps,&numtimesteps);
+
+	/*Figure out how many time steps we are going to return: */
+	int  numsteps               = 0;
+	bool iscurrenttime_included = false;
+	for(int i=0;i<numtimesteps;i++){
+		if(timesteps[i]==currenttime) iscurrenttime_included=true;
+		if(timesteps[i]>currenttime)  break;
+		else numsteps++;
+	}
+	if(iscurrenttime_included==false)numsteps++;
+
+	/*allocate: */
+	IssmDouble* times=xNew<IssmDouble>(numsteps);
+	IssmDouble* values=xNew<IssmDouble>(numsteps);
+
+	for(int i=0;i<numsteps;i++){
+		if((iscurrenttime_included==false) && (i==(numsteps-1))){
+			Input2* input = this->GetInput2(input_enum,currenttime);
+			input->GetInputAverage(&values[i]);
+			times[i]=currenttime;
+		}
+		else{
+			TriaInput2* input = transient_input->GetTriaInput(i);
+			this->InputServe(input);
+			input->GetInputAverage(&values[i]);
+			times[i]=timesteps[i];
+		}
+	}
+
+	/*Assign output pointers*/
+	*pvalues=values;
+	*ptimes=times;
+	*pnumtimes=numtimesteps;
+}
+/*}}}*/
 void       Tria::GetInputValue(IssmDouble* pvalue,Node* node,int enumtype){/*{{{*/
 
-	Input* input=inputs->GetInput(enumtype);
+	Input2* input=this->GetInput2(enumtype);
 	if(!input) _error_("No input of type " << EnumToStringx(enumtype) << " found in tria");
 
@@ -1848,5 +2285,5 @@
 void       Tria::GetInputValue(IssmDouble* pvalue,Vertex* vertex,int enumtype){/*{{{*/
 
-	Input* input=inputs->GetInput(enumtype);
+	Input2* input=this->GetInput2(enumtype);
 	if(!input) _error_("No input of type " << EnumToStringx(enumtype) << " found in tria");
 
@@ -1868,5 +2305,5 @@
 
 	/*Recover parameters and values*/
-	GetInputListOnVertices(&levelset[0],levelsetenum);
+	Element::GetInputListOnVertices(&levelset[0],levelsetenum);
 
 	/* Get nodes where there is no ice */
@@ -1913,5 +2350,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&lsf[0],levelset_enum);
+	Element::GetInputListOnVertices(&lsf[0],levelset_enum);
 
 	/* Determine distribution of ice over element.
@@ -2052,70 +2489,69 @@
 void       Tria::GetVectorFromControlInputs(Vector<IssmDouble>* vector,int control_enum,int control_index,const char* data){/*{{{*/
 
-	int vertexidlist[NUMVERTICES];
-	Input *input=NULL;
-
 	/*Get out if this is not an element input*/
 	if(!IsInputEnum(control_enum)) _error_("Enum "<<EnumToStringx(control_enum)<<" is not in IsInput");
 
 	/*Prepare index list*/
-	GradientIndexing(&vertexidlist[0],control_index);
+	int idlist[NUMVERTICES];
+	GradientIndexing(&idlist[0],control_index);
 
 	/*Get input (either in element or material)*/
-	input=(Input*)this->inputs->GetInput(control_enum);   _assert_(input);
-
-	/*Check that it is a ControlInput*/
-	if (input->ObjectEnum()!=ControlInputEnum){
-		_error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	}
-
-	((ControlInput*)input)->GetVectorFromInputs(vector,&vertexidlist[0],data);
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,data);   _assert_(input);
+
+	/*Intermediaries*/
+	int numindices;
+	int indices[NUMVERTICES];
+
+	/*Check interpolation*/
+	int interpolation = input->GetInterpolation();
+	switch(interpolation){
+		case P1Enum:
+			numindices = NUMVERTICES;
+			for(int i=0;i<NUMVERTICES;i++) indices[i] = vertices[i]->lid;
+			input->Serve(numindices,&indices[0]);
+			break;
+		default: _error_("interpolation "<<EnumToStringx(interpolation)<<" not supported");
+	}
+
+	/*Flag as collapsed for later use*/
+	if(this->iscollapsed){
+		xDynamicCast<PentaInput2*>(input)->SetServeCollapsed(true);
+	}
+
+	/* Start looping on the number of vertices: */
+	IssmDouble values[NUMVERTICES];
+	Gauss*gauss=this->NewGauss();
+	for(int iv=0;iv<NUMVERTICES;iv++){
+		gauss->GaussVertex(iv);
+		input->GetInputValue(&values[iv],gauss);
+	}
+	delete gauss;
+
+	vector->SetValues(NUMVERTICES,idlist,&values[0],INS_VAL);
 }
 /*}}}*/
 void       Tria::GetVectorFromControlInputs(Vector<IssmDouble>* vector,int control_enum,int control_index,const char* data,int offset){/*{{{*/
 
-	int* idlist = NULL;
-	IssmDouble* values = NULL;
-	int* M = NULL;
-
-	/*Get out if this is not an element input*/
-	if(!IsInputEnum(control_enum)) _error_("Enum "<<EnumToStringx(control_enum)<<" is not in IsInput");
-	Input* input=(Input*)this->inputs->GetInput(control_enum);   _assert_(input);
-
-	parameters->FindParam(&M,NULL,ControlInputSizeMEnum);
-
-	/*Cast to Controlinput*/
-	if(input->ObjectEnum()!=ControlInputEnum) _error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	ControlInput* controlinput = xDynamicCast<ControlInput*>(input);
-
-	if(strcmp(data,"value")==0){
-		input  = controlinput->values;
-	}
-	else if (strcmp(data,"lowerbound")==0){
-		input = controlinput->minvalues;
-	}
-	else if (strcmp(data,"upperbound")==0){
-		input = controlinput->maxvalues;
-	}
-	else if (strcmp(data,"gradient")==0){
-		input = controlinput->gradient;
-	}
-	else{
-		_error_("Data " << data << " not supported yet");
-	}
+	/*Get input*/
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,data);   _assert_(input);
+
+	/*Lid list once for all*/
+	int lidlist[NUMVERTICES];
+	for(int i=0;i<NUMVERTICES;i++) lidlist[i] = vertices[i]->lid;
+
 	/*Check what input we are dealing with*/
-
 	switch(input->ObjectEnum()){
-		case TriaInputEnum:
+		case TriaInput2Enum:
 			  {
-				TriaInput* triainput = xDynamicCast<TriaInput*>(input);
-				if(triainput->interpolation_type!=P1Enum) _error_("not supported yet");
+				IssmDouble values[NUMVERTICES];
+				int        idlist[NUMVERTICES];
+
+				TriaInput2* triainput = xDynamicCast<TriaInput2*>(input);
+				if(triainput->GetInputInterpolationType()!=P1Enum) _error_("not supported yet");
+				input->Serve(NUMVERTICES,&lidlist[0]);
 
 				/*Create list of indices and values for global vector*/
-				idlist = xNew<int>(NUMVERTICES);
-				values = xNew<IssmDouble>(NUMVERTICES);
 				GradientIndexing(&idlist[0],control_index);
-				for(int i=0;i<NUMVERTICES;i++){
-					values[i] = triainput->values[i];
-				}
+				for(int i=0;i<NUMVERTICES;i++) values[i] = triainput->element_values[i];
 				vector->SetValues(NUMVERTICES,idlist,values,INS_VAL);
 				break;
@@ -2124,26 +2560,30 @@
 		case TransientInputEnum:
 				{
-					TransientInput* transientinput = xDynamicCast<TransientInput*>(input);
+					TransientInput2* transientinput = xDynamicCast<TransientInput2*>(input);
 					int N = transientinput->numtimesteps;
-					idlist = xNew<int>(NUMVERTICES*N);
-					values = xNew<IssmDouble>(NUMVERTICES*N);
+					int* M=NULL;
+					parameters->FindParam(&M,NULL,ControlInputSizeMEnum);
+					int* idlist = xNew<int>(NUMVERTICES*N);
+					IssmDouble* values = xNew<IssmDouble>(NUMVERTICES*N);
 					for(int t=0;t<transientinput->numtimesteps;t++) {
 						IssmDouble time = transientinput->GetTimeByOffset(t);
-						input = transientinput->GetTimeInput(time);
-						TriaInput* timeinput = xDynamicCast<TriaInput*>(input);
-						if(timeinput->interpolation_type!=P1Enum) _error_("not supported yet");
-						/*Create list of indices and values for global vector*/
-						for(int i=0;i<NUMVERTICES;i++){
-								idlist[N*i+t] = offset + this->vertices[i]->Sid()+t*M[control_index];
-								values[N*i+t] = timeinput->values[i];
-						}
+						_error_("not implemented");
+						//TriaInput* timeinput = xDynamicCast<TriaInput*>(transientinput->GetTimeInput(time));
+						//if(timeinput->interpolation_type!=P1Enum) _error_("not supported yet");
+						//input->Serve(NUMVERTICES,&lidlist[0]);
+						///*Create list of indices and values for global vector*/
+						//for(int i=0;i<NUMVERTICES;i++){
+						//		idlist[N*i+t] = offset + this->vertices[i]->Sid()+t*M[control_index];
+						//		values[N*i+t] = timeinput->values[i];
+						//}
 					}
 					vector->SetValues(NUMVERTICES*transientinput->numtimesteps,idlist,values,INS_VAL);
+					xDelete<int>(M);
+					xDelete<int>(idlist);
+					xDelete<IssmDouble>(values);
 					break;
 				}
-		default: _error_("input "<<input->ObjectEnum()<<" not supported yet");
-	}
-	xDelete<int>(idlist);
-	xDelete<IssmDouble>(values);
+		default: _error_("input "<<EnumToStringx(input->ObjectEnum())<<" not supported yet");
+	}
 }
 /*}}}*/
@@ -2199,5 +2639,5 @@
 	groundedarea=phi*this->GetArea();
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		groundedarea=groundedarea*scalefactor;
@@ -2215,5 +2655,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonbaseEnum);
 	sum = values[0]+values[1]+values[2];
 
@@ -2236,5 +2676,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonsurfaceEnum);
 	sum = values[0]+values[1]+values[2];
 
@@ -2279,14 +2719,14 @@
 	IssmDouble vx,vy,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
 	}
 	else{
-		vx_input=inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyAverageEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxAverageEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyAverageEnum); _assert_(vy_input);
 	}
 
@@ -2327,5 +2767,5 @@
 	/*Recover parameters and values*/
 	parameters->FindParam(&domaintype,DomainTypeEnum);
-	GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -2408,14 +2848,14 @@
 	IssmDouble vx,vy,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
 	}
 	else{
-		vx_input=inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyAverageEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxAverageEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyAverageEnum); _assert_(vy_input);
 	}
 
@@ -2438,5 +2878,5 @@
 IssmDouble Tria::GroundinglineMassFlux(bool scaled){/*{{{*/
 
-	/*Make sure there is an ice front here*/
+	/*Make sure there is a grounding line here*/
 	if(!IsIceInElement()) return 0;
 	if(!IsZeroLevelset(MaskGroundediceLevelsetEnum)) return 0;
@@ -2456,5 +2896,5 @@
 	/*Recover parameters and values*/
 	parameters->FindParam(&domaintype,DomainTypeEnum);
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -2535,14 +2975,14 @@
 	IssmDouble vx,vy,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
 	}
 	else{
-		vx_input=inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyAverageEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxAverageEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyAverageEnum); _assert_(vy_input);
 	}
 
@@ -2578,5 +3018,5 @@
 
 	if(!IsIceInElement())return 0.;
-	//if(!(this->inputs->Max(MaskIceLevelsetEnum)<0)) return 0;
+	//if(!IsIceOnlyInElement()) return 0;
 
 	int domaintype;
@@ -2592,5 +3032,5 @@
 		area_base=this->GetAreaIce();
 		if(scaled==true){
-			GetInputListOnVertices(&scalefactors[0],MeshScaleFactorEnum);
+			Element::GetInputListOnVertices(&scalefactors[0],MeshScaleFactorEnum);
 			for(i=0;i<NUMVERTICES;i++) SFaux[i]= scalefactors[indices[i]]; //sort thicknesses in ice/noice
 			switch(numiceverts){
@@ -2615,6 +3055,6 @@
 			area_base=area_base*scalefactor;
 		}
-		GetInputListOnVertices(&surfaces[0],SurfaceEnum);
-		GetInputListOnVertices(&bases[0],BaseEnum);
+		Element::GetInputListOnVertices(&surfaces[0],SurfaceEnum);
+		Element::GetInputListOnVertices(&bases[0],BaseEnum);
 		for(i=0;i<NUMVERTICES;i++) Haux[i]= surfaces[indices[i]]-bases[indices[i]]; //sort thicknesses in ice/noice
 		switch(numiceverts){
@@ -2642,5 +3082,5 @@
 		area_base=this->GetArea();
 		if(scaled==true){
-			Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+			Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 			scalefactor_input->GetInputAverage(&scalefactor);
 			area_base=area_base*scalefactor;
@@ -2648,6 +3088,6 @@
 
 		/*Now get the average height*/
-		Input* surface_input = inputs->GetInput(SurfaceEnum); _assert_(surface_input);
-		Input* base_input     = inputs->GetInput(BaseEnum);     _assert_(base_input);
+		Input2* surface_input = this->GetInput2(SurfaceEnum); _assert_(surface_input);
+		Input2* base_input    = this->GetInput2(BaseEnum);    _assert_(base_input);
 		surface_input->GetInputAverage(&surface);
 		base_input->GetInputAverage(&base);
@@ -2686,5 +3126,5 @@
 	base = 1./2. * fabs((xyz_list[0][0]-xyz_list[2][0])*(xyz_list[1][1]-xyz_list[0][1]) - (xyz_list[0][0]-xyz_list[1][0])*(xyz_list[2][1]-xyz_list[0][1]));
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);
 		base=base*scalefactor;
@@ -2692,7 +3132,7 @@
 
 	/*Now get the average height and bathymetry*/
-	Input* surface_input    = inputs->GetInput(SurfaceEnum);    _assert_(surface_input);
-	Input* base_input        = inputs->GetInput(BaseEnum);        _assert_(base_input);
-	Input* bed_input = inputs->GetInput(BedEnum); _assert_(bed_input);
+	Input2* surface_input = this->GetInput2(SurfaceEnum); _assert_(surface_input);
+	Input2* base_input    = this->GetInput2(BaseEnum);    _assert_(base_input);
+	Input2* bed_input     = this->GetInput2(BedEnum);     _assert_(bed_input);
 	if(!bed_input) _error_("Could not find bed");
 	surface_input->GetInputAverage(&surface);
@@ -2707,11 +3147,11 @@
 
 	/*New input*/
-	Input* oldinput=NULL;
-	Input* newinput=NULL;
+	Input2* oldinput=NULL;
+	Input2* newinput=NULL;
 
 	/*copy input of enum_type*/
-	oldinput=(Input*)this->inputs->GetInput(enum_type);
+	oldinput=this->GetInput2(enum_type);
 	if(!oldinput)_error_("could not find old input with enum: " << EnumToStringx(enum_type));
-	newinput=(Input*)oldinput->copy();
+	newinput=oldinput->copy();
 
 	/*Assign new name (average)*/
@@ -2719,5 +3159,5 @@
 
 	/*Add new input to current element*/
-	this->inputs->AddInput((Input*)newinput);
+	_error_("not implemented");
 }
 /*}}}*/
@@ -2748,128 +3188,4 @@
 		tria_vertex_ids[i]=reCast<int>(iomodel->elements[3*index+i]); //ids for vertices are in the elements array from Matlab
 	}
-
-	/*Need to know the type of approximation for this element*/
-	if(iomodel->Data("md.flowequation.element_equation")){
-		this->inputs->AddInput(new IntInput(ApproximationEnum,IoCodeToEnumElementEquation(reCast<int>(iomodel->Data("md.flowequation.element_equation")[index]))));
-	}
-
-	/*Control Inputs*/
-	if (control_analysis && !ad_analysis){
-		if(!ad_analysis)iomodel->FindConstant(&controls,NULL,"md.inversion.control_parameters");
-		if(ad_analysis)iomodel->FindConstant(&controls,NULL,"md.autodiff.independent_object_names");
-
-		for(i=0;i<num_control_type;i++){
-			_assert_(controls[i]);
-			int control = StringToEnumx(controls[i]);
-			switch(control){
-				case BalancethicknessThickeningRateEnum:
-					if (iomodel->Data("md.balancethickness.thickening_rate")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.balancethickness.thickening_rate")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(BalancethicknessThickeningRateEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case VxEnum:
-					if (iomodel->Data("md.initialization.vx")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.initialization.vx")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(VxEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case VyEnum:
-					if (iomodel->Data("md.initialization.vy")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.initialization.vy")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i]/yts;
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i]/yts;
-						this->inputs->AddInput(new ControlInput(VyEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case ThicknessEnum:
-					if(iomodel->Data("md.geometry.thickness")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.geometry.thickness")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(ThicknessEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case BalancethicknessSpcthicknessEnum:
-					if(iomodel->Data("md.balancethickness.spcthickness")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.balancethickness.spcthickness")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(BalancethicknessSpcthicknessEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case BalancethicknessOmegaEnum:
-					if(iomodel->Data("md.balancethickness.omega")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.balancethickness.omega")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(BalancethicknessOmegaEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case FrictionCoefficientEnum:
-					if (iomodel->Data("md.friction.coefficient")){
-						for(j=0;j<NUMVERTICES;j++)nodeinputs[j]=iomodel->Data("md.friction.coefficient")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(FrictionCoefficientEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case MaterialsRheologyBbarEnum:
-					if(iomodel->Data("md.materials.rheology_B")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.materials.rheology_B")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(MaterialsRheologyBbarEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				case DamageDbarEnum:
-					if(iomodel->Data("md.damage.D")){
-						for(j=0;j<NUMVERTICES;j++) nodeinputs[j]=iomodel->Data("md.damage.D")[tria_vertex_ids[j]-1];
-						for(j=0;j<NUMVERTICES;j++)cmmininputs[j]=iomodel->Data("md.inversion.min_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						for(j=0;j<NUMVERTICES;j++)cmmaxinputs[j]=iomodel->Data("md.inversion.max_parameters")[(tria_vertex_ids[j]-1)*num_control_type+i];
-						this->inputs->AddInput(new ControlInput(DamageDbarEnum,TriaInputEnum,nodeinputs,cmmininputs,cmmaxinputs,P1Enum,i+1));
-					}
-					break;
-				default:
-					_error_("Control " << EnumToStringx(control) << " not implemented yet");
-			}
-		}
-		for(i=0;i<num_control_type;i++) xDelete<char>(controls[i]);
-		xDelete<char*>(controls);
-	}
-
-	/*DatasetInputs*/
-	if (control_analysis && iomodel->Data("md.inversion.cost_functions_coefficients")){
-
-		/*Generate cost functions associated with the iomodel*/
-		char**   cost_functions       = NULL;
-		int*     cost_functions_enums = NULL;
-		int      num_cost_functions;
-
-		iomodel->FindConstant(&num_cost_functions,"md.inversion.num_cost_functions");
-		iomodel->FindConstant(&cost_functions,&num_cost_functions,"md.inversion.cost_functions");
-		if(num_cost_functions<1) _error_("No cost functions found");
-		cost_functions_enums=xNew<int>(num_cost_functions);
-		for(j=0;j<num_cost_functions;j++){ cost_functions_enums[j]=StringToEnumx(cost_functions[j]); }
-
-		/*Create inputs and add to DataSetInput*/
-		DatasetInput* datasetinput=new DatasetInput(InversionCostFunctionsCoefficientsEnum);
-		for(i=0;i<num_responses;i++){
-			for(j=0;j<3;j++)nodeinputs[j]=iomodel->Data("md.inversion.cost_functions_coefficients")[(tria_vertex_ids[j]-1)*num_responses+i];
-			datasetinput->AddInput(new TriaInput(InversionCostFunctionsCoefficientsEnum,nodeinputs,P1Enum),cost_functions_enums[i]);
-		}
-
-		/*Add datasetinput to element inputs*/
-		this->inputs->AddInput(datasetinput);
-
-		/*Clean up cost functions*/
-		xDelete<int>(cost_functions_enums);
-		for(int j=0;j<num_cost_functions;j++) xDelete<char>(cost_functions[j]);
-		xDelete<char*>(cost_functions);
-	}
 }
 /*}}}*/
@@ -2894,5 +3210,5 @@
 
 	/*Add input to the element: */
-	this->inputs->AddInput(new TriaInput(enum_type,values,this->element_type));
+	this->AddInput2(enum_type,values,this->element_type);
 
 	/*Free ressources:*/
@@ -2908,6 +3224,9 @@
 	int         numnodes;
 	IssmDouble  value;
+	int         lidlist[NUMVERTICES];
 	int        *doflist = NULL;
 	IssmDouble *values  = NULL;
+
+	GetVerticesLidList(&lidlist[0]);
 
 	switch(type){
@@ -2920,5 +3239,5 @@
 			}
 			/*update input*/
-			this->inputs->AddInput(new TriaInput(name,values,P1Enum));
+			inputs2->SetTriaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 			break;
 
@@ -2931,5 +3250,5 @@
 			}
 			/*update input*/
-			this->inputs->AddInput(new TriaInput(name,values,P1Enum));
+			inputs2->SetTriaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 			break;
 
@@ -2942,5 +3261,5 @@
 			}
 			/*update input*/
-			this->inputs->AddInput(new TriaInput(name,values,P1Enum));
+			inputs2->SetTriaInput(name,P1Enum,NUMVERTICES,lidlist,values);
 			break;
 
@@ -2956,5 +3275,6 @@
 				if(xIsInf<IssmDouble>(values[i])) _error_("Inf found in vector");
 			}
-			this->inputs->AddInput(new TriaInput(name,values,this->element_type));
+			//this->inputs->AddInput(new TriaInput(name,values,this->element_type));
+			_error_("not implemented");
 			break;
 
@@ -2969,5 +3289,10 @@
 				if(xIsInf<IssmDouble>(values[i])) _error_("Inf found in vector");
 			}
-			this->inputs->AddInput(new TriaInput(name,values,this->element_type));
+			if(this->element_type==P1Enum){
+				inputs2->SetTriaInput(name,P1Enum,NUMVERTICES,lidlist,values);
+			}
+			else{
+				inputs2->SetTriaInput(name,this->element_type,this->lid,numnodes,values);
+			}
 			break;
 
@@ -2977,5 +3302,7 @@
 			if(xIsInf<IssmDouble>(value)) _error_("Inf found in vector");
 			/*update input*/
-			this->inputs->AddInput(new DoubleInput(name,value));
+			//this->inputs->AddInput(new DoubleInput(name,value));
+			//inputs2->SetTriaInput(name,P1Enum,NUMVERTICES,lidlist,values);
+			_error_("not implemented");
 			break;
 
@@ -2996,5 +3323,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&values[0],MeshVertexonboundaryEnum);
+	Element::GetInputListOnVertices(&values[0],MeshVertexonboundaryEnum);
 	sum = values[0]+values[1]+values[2];
 
@@ -3017,5 +3344,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&ls[0],MaskIceLevelsetEnum);
 
 	/* If only one vertex has ice, there is an ice front here */
@@ -3043,30 +3370,4 @@
 }
 /*}}}*/
-bool       Tria::IsOnBase(){/*{{{*/
-
-	int domaintype;
-	this->parameters->FindParam(&domaintype,DomainTypeEnum);
-	switch(domaintype){
-		case Domain2DverticalEnum:
-			return HasEdgeOnBase();
-		case Domain2DhorizontalEnum:
-			return true;
-		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
-	}
-}
-/*}}}*/
-bool       Tria::IsOnSurface(){/*{{{*/
-
-	int domaintype;
-	this->parameters->FindParam(&domaintype,DomainTypeEnum);
-	switch(domaintype){
-		case Domain2DverticalEnum:
-			return HasEdgeOnSurface();
-		case Domain2DhorizontalEnum:
-			return true;
-		default: _error_("mesh "<<EnumToStringx(domaintype)<<" not supported yet");
-	}
-}
-/*}}}*/
 bool       Tria::IsZeroLevelset(int levelset_enum){/*{{{*/
 
@@ -3075,5 +3376,5 @@
 
 	/*Retrieve all inputs and parameters*/
-	GetInputListOnVertices(&ls[0],levelset_enum);
+	Element::GetInputListOnVertices(&ls[0],levelset_enum);
 
 	/*If the level set is awlays <0, there is no ice front here*/
@@ -3120,5 +3421,5 @@
 	/*intermediary: */
 	IssmDouble* values=NULL;
-	Input*      thickness_input=NULL;
+	Input2*     thickness_input=NULL;
 	IssmDouble  thickness;
 	IssmDouble  weight;
@@ -3138,5 +3439,5 @@
 
 	/*Retrieve inputs required:*/
-	thickness_input=this->GetInput(ThicknessEnum); _assert_(thickness_input);
+	thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
 
 	/*Retrieve material parameters: */
@@ -3200,14 +3501,14 @@
 	/*Get velocity and thickness*/
 	this->parameters->FindParam(&domaintype,DomainTypeEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
 	}
 	else{
-		vx_input=inputs->GetInput(VxAverageEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyAverageEnum); _assert_(vy_input);
+		vx_input=this->GetInput2(VxAverageEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyAverageEnum); _assert_(vy_input);
 	}
 
@@ -3248,7 +3549,7 @@
 	/*Retrieve all inputs we will be needing: */
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* model_input=inputs->GetInput(modelenum);   _assert_(model_input);
-	Input* observation_input=inputs->GetInput(observationenum);_assert_(observation_input);
-	Input* weights_input     =inputs->GetInput(weightsenum);     _assert_(weights_input);
+	Input2* model_input=this->GetInput2(modelenum);   _assert_(model_input);
+	Input2* observation_input=this->GetInput2(observationenum);_assert_(observation_input);
+	Input2* weights_input     =this->GetInput2(weightsenum);     _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -3289,5 +3590,5 @@
 	/*Retrieve all inputs we will be needing: */
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* weights_input     =inputs->GetInput(weightsenum);     _assert_(weights_input);
+	Input2* weights_input     =this->GetInput2(weightsenum);     _assert_(weights_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -3425,9 +3726,8 @@
 	int         found = 0;
 	IssmDouble  value;
-	Input      *data  = NULL;
 	GaussTria  *gauss = NULL;
 
 	/*First, serarch the input: */
-	data=inputs->GetInput(natureofdataenum);
+	Input2* data=this->GetInput2(natureofdataenum);
 
 	/*figure out if we have the vertex id: */
@@ -3526,7 +3826,7 @@
 	rho_ice=FindParam(MaterialsRhoIceEnum);
 	density=rho_ice/rho_water;
-	GetInputListOnVertices(&h[0],ThicknessEnum);
-	GetInputListOnVertices(&r[0],BedEnum);
-	GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
+	Element::GetInputListOnVertices(&h[0],ThicknessEnum);
+	Element::GetInputListOnVertices(&r[0],BedEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskGroundediceLevelsetEnum);
 
 	/*go through vertices, and figure out which ones are grounded and want to unground: */
@@ -3594,10 +3894,10 @@
 
 	/*For FS only: we want the CS to be tangential to the bedrock*/
-	inputs->GetInputValue(&approximation,ApproximationEnum);
+	this->GetInput2Value(&approximation,ApproximationEnum);
 	if(!HasNodeOnBase() ||  approximation!=FSApproximationEnum) return;
 
 	/*Get inputs*/
-	Input* slope_input=inputs->GetInput(BedSlopeXEnum);                             _assert_(slope_input);
-	Input* groundedicelevelset_input=inputs->GetInput(MaskGroundediceLevelsetEnum); _assert_(groundedicelevelset_input);
+	Input2* slope_input=this->GetInput2(BedSlopeXEnum);                             _assert_(slope_input);
+	Input2* groundedicelevelset_input=this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(groundedicelevelset_input);
 	vertexonbase = xNew<IssmDouble>(numnodes);
 	this->GetInputListOnNodesVelocity(&vertexonbase[0],MeshVertexonbaseEnum);
@@ -3665,8 +3965,8 @@
 
 	/*Get inputs*/
-	Input* bed_input = this->GetInput(BedEnum);                     _assert_(bed_input);
-	Input* qsg_input = this->GetInput(FrontalForcingsSubglacialDischargeEnum);		 _assert_(qsg_input);
-	Input* TF_input  = this->GetInput(FrontalForcingsThermalForcingEnum);          _assert_(TF_input);
-	GetInputListOnVertices(&basinid[0],FrontalForcingsBasinIdEnum);
+	Input2* bed_input = this->GetInput2(BedEnum);                     _assert_(bed_input);
+	Input2* qsg_input = this->GetInput2(FrontalForcingsSubglacialDischargeEnum);		 _assert_(qsg_input);
+	Input2* TF_input  = this->GetInput2(FrontalForcingsThermalForcingEnum);          _assert_(TF_input);
+	Element::GetInputListOnVertices(&basinid[0],FrontalForcingsBasinIdEnum);
 
 	this->FindParam(&yts, ConstantsYtsEnum);
@@ -3700,5 +4000,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new TriaInput(CalvingMeltingrateEnum,&meltrates[0],P1Enum));
+	this->AddInput2(CalvingMeltingrateEnum,&meltrates[0],P1Enum);
 
 	/*Cleanup and return*/
@@ -3710,4 +4010,5 @@
 
 	IssmDouble  values[NUMVERTICES];
+	int         lidlist[NUMVERTICES];
 	int         idlist[NUMVERTICES],control_init;
 
@@ -3732,11 +4033,6 @@
 	if(!IsInputEnum(control_enum)) return;
 
-	Input* input     = (Input*)this->inputs->GetInput(control_enum);   _assert_(input);
-	if(input->ObjectEnum()!=ControlInputEnum){
-		_error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	}
-
-	ControlInput* controlinput = xDynamicCast<ControlInput*>(input);
-	input = controlinput->values;
+	this->GetVerticesLidList(&lidlist[0]);
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,"value");   _assert_(input);
 
 	/*Get values on vertices*/
@@ -3746,12 +4042,12 @@
 			values[i]=vector[idlist[i]];
 		}
-		if(input->ObjectEnum()==TriaInputEnum){
-			Input* new_input = new TriaInput(control_enum,values,P1Enum);
-			controlinput->SetInput(new_input);
-		}
-		else if(input->ObjectEnum()==TransientInputEnum){
-			Input* new_input = new TriaInput(control_enum,values,P1Enum);
-			controlinput->SetInput(new_input,n);
-			controlinput->Configure(parameters);
+		if(input->ObjectEnum()==TriaInput2Enum){
+			input->SetInput(P1Enum,NUMVERTICES,&lidlist[0],&values[0]);
+		}
+		else if(input->ObjectEnum()==TransientInput2Enum){
+			_error_("not implemented");
+			//Input* new_input = new TriaInput(control_enum,values,P1Enum);
+			//controlinput->SetInput(new_input,n);
+			//controlinput->Configure(parameters);
 		}
 		else _error_("Type not supported");
@@ -3762,5 +4058,6 @@
 
 	IssmDouble  values[NUMVERTICES];
-	int         vertexpidlist[NUMVERTICES],control_init;
+	int         idlist[NUMVERTICES];
+	int         lidlist[NUMVERTICES];
 
 	/*Get Domain type*/
@@ -3769,5 +4066,4 @@
 
 	/*Specific case for depth averaged quantities*/
-	control_init=control_enum;
 	if(domaintype==Domain2DverticalEnum){
 		if(control_enum==MaterialsRheologyBbarEnum){
@@ -3784,18 +4080,16 @@
 	if(!IsInputEnum(control_enum)) return;
 
-	/*hrepare index list*/
-	GradientIndexing(&vertexpidlist[0],control_index);
+	/*prepare index list*/
+	this->GetVerticesLidList(&lidlist[0]);
+	GradientIndexing(&idlist[0],control_index);
 
 	/*Get values on vertices*/
 	for(int i=0;i<NUMVERTICES;i++){
-		values[i]=vector[vertexpidlist[i]];
-	}
-	Input* new_input = new TriaInput(control_enum,values,P1Enum);
-	Input* input     = (Input*)this->inputs->GetInput(control_enum);   _assert_(input);
-	if(input->ObjectEnum()!=ControlInputEnum){
-		_error_("input " << EnumToStringx(control_enum) << " is not a ControlInput");
-	}
-
-	((ControlInput*)input)->SetInput(new_input);
+		values[i]=vector[idlist[i]];
+	}
+
+	/*Set Input*/
+	ElementInput2* input=this->inputs2->GetControlInput2Data(control_enum,"value");   _assert_(input);
+	input->SetInput(P1Enum,NUMVERTICES,&lidlist[0],&values[0]);
 }
 /*}}}*/
@@ -3813,4 +4107,24 @@
 		this->nodes=(Node**)this->hnodes[analysis_counter]->deliverp();
 	}
+
+}
+/*}}}*/
+void       Tria::SetElementInput(int enum_in,IssmDouble value){/*{{{*/
+
+	this->SetElementInput(this->inputs2,enum_in,value);
+
+}
+/*}}}*/
+void       Tria::SetElementInput(Inputs2* inputs2,int enum_in,IssmDouble value){/*{{{*/
+
+	_assert_(inputs2);
+	inputs2->SetTriaInput(enum_in,P0Enum,this->lid,value);
+
+}
+/*}}}*/
+void       Tria::SetElementInput(Inputs2* inputs2,int numindices,int* indices,IssmDouble* values,int enum_in){/*{{{*/
+
+	_assert_(inputs2);
+	inputs2->SetTriaInput(enum_in,P1Enum,numindices,indices,values);
 
 }
@@ -3844,8 +4158,14 @@
 	Seg* seg=new Seg();
 	seg->id=this->id;
-	seg->inputs=(Inputs*)this->inputs->SpawnSegInputs(index1,index2);
+	seg->sid=this->sid;
+	seg->lid=this->lid;
+	seg->inputs2=this->inputs2;
 	seg->parameters=this->parameters;
 	seg->element_type=P1Enum; //Only P1 CG for now (TO BE CHANGED)
 	this->SpawnSegHook(xDynamicCast<ElementHook*>(seg),index1,index2);
+
+	seg->iscollapsed = 1;
+	seg->collapsed_ids[0] = index1;
+	seg->collapsed_ids[1] = index2;
 
 	/*Spawn material*/
@@ -3893,6 +4213,6 @@
 
 	/*Retrieve all inputs we will need*/
-	Input* vx_input=inputs->GetInput(VxEnum);                                  _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);                                  _assert_(vy_input);
+	Input2* vx_input=this->GetInput2(VxEnum);                                  _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);                                  _assert_(vy_input);
 
 	/* Start looping on the number of vertices: */
@@ -3917,5 +4237,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new TriaInput(StrainRateparallelEnum,&strainparallel[0],P1Enum));
+	this->AddInput2(StrainRateparallelEnum,&strainparallel[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -3939,6 +4259,6 @@
 
 	/*Retrieve all inputs we will need*/
-	Input* vx_input=inputs->GetInput(VxEnum);                                  _assert_(vx_input);
-	Input* vy_input=inputs->GetInput(VyEnum);                                  _assert_(vy_input);
+	Input2* vx_input=this->GetInput2(VxEnum);                                  _assert_(vx_input);
+	Input2* vy_input=this->GetInput2(VyEnum);                                  _assert_(vy_input);
 
 	/* Start looping on the number of vertices: */
@@ -3963,5 +4283,5 @@
 
 	/*Add input*/
-	this->inputs->AddInput(new TriaInput(StrainRateperpendicularEnum,&strainperpendicular[0],P1Enum));
+	this->AddInput2(StrainRateperpendicularEnum,&strainperpendicular[0],P1DGEnum);
 
 	/*Clean up and return*/
@@ -4004,10 +4324,5 @@
 
 	/*intermediary: */
-	int    i;
-	IssmDouble C,dt;
-	IssmDouble dx,dy;
-	IssmDouble maxx,minx;
-	IssmDouble maxy,miny;
-	IssmDouble maxabsvx,maxabsvy;
+	IssmDouble C;
 	IssmDouble xyz_list[NUMVERTICES][3];
 
@@ -4016,26 +4331,28 @@
 
 	/*Get for Vx and Vy, the max of abs value: */
-	maxabsvx = this->inputs->MaxAbs(VxEnum);
-	maxabsvy = this->inputs->MaxAbs(VyEnum);
+	Input2* vx_input = this->GetInput2(VxEnum); _assert_(vx_input);
+	Input2* vy_input = this->GetInput2(VyEnum); _assert_(vy_input);
+	IssmDouble maxabsvx = vx_input->GetInputMaxAbs();
+	IssmDouble maxabsvy = vy_input->GetInputMaxAbs();
 
 	/* Get node coordinates and dof list: */
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
 
-	minx=xyz_list[0][0];
-	maxx=xyz_list[0][0];
-	miny=xyz_list[0][1];
-	maxy=xyz_list[0][1];
-
-	for(i=1;i<NUMVERTICES;i++){
-		if (xyz_list[i][0]<minx)minx=xyz_list[i][0];
-		if (xyz_list[i][0]>maxx)maxx=xyz_list[i][0];
-		if (xyz_list[i][1]<miny)miny=xyz_list[i][1];
-		if (xyz_list[i][1]>maxy)maxy=xyz_list[i][1];
-	}
-	dx=maxx-minx;
-	dy=maxy-miny;
+	IssmDouble minx=xyz_list[0][0];
+	IssmDouble maxx=xyz_list[0][0];
+	IssmDouble miny=xyz_list[0][1];
+	IssmDouble maxy=xyz_list[0][1];
+
+	for(int i=1;i<NUMVERTICES;i++){
+		if(xyz_list[i][0]<minx) minx=xyz_list[i][0];
+		if(xyz_list[i][0]>maxx) maxx=xyz_list[i][0];
+		if(xyz_list[i][1]<miny) miny=xyz_list[i][1];
+		if(xyz_list[i][1]>maxy) maxy=xyz_list[i][1];
+	}
+	IssmDouble dx=maxx-minx;
+	IssmDouble dy=maxy-miny;
 
 	/*CFL criterion: */
-	dt=C/(maxabsvx/dx+maxabsvy/dy);
+	IssmDouble dt = C/(maxabsvx/dx+maxabsvy/dy);
 
 	return dt;
@@ -4062,5 +4379,5 @@
 	/*Recover parameters and values*/
 	parameters->FindParam(&domaintype,DomainTypeEnum);
-	GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -4143,14 +4460,14 @@
 	IssmDouble calvingratex,calvingratey,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* calvingratex_input=NULL;
-	Input* calvingratey_input=NULL;
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* calvingratex_input=NULL;
+	Input2* calvingratey_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-		calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
+		calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+		calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
 	}
 	else{
-		calvingratex_input=inputs->GetInput(CalvingratexAverageEnum); _assert_(calvingratex_input);
-		calvingratey_input=inputs->GetInput(CalvingrateyAverageEnum); _assert_(calvingratey_input);
+		calvingratex_input=this->GetInput2(CalvingratexAverageEnum); _assert_(calvingratex_input);
+		calvingratey_input=this->GetInput2(CalvingrateyAverageEnum); _assert_(calvingratey_input);
 	}
 
@@ -4191,5 +4508,5 @@
 	/*Recover parameters and values*/
 	parameters->FindParam(&domaintype,DomainTypeEnum);
-	GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
+	Element::GetInputListOnVertices(&gl[0],MaskIceLevelsetEnum);
 
 	/*Be sure that values are not zero*/
@@ -4272,20 +4589,20 @@
 	IssmDouble calvingratex,calvingratey,vx,vy,vel,meltingrate,meltingratex,meltingratey,thickness,Jdet;
 	IssmDouble rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* thickness_input=inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
-	Input* calvingratex_input=NULL;
-	Input* calvingratey_input=NULL;
-	Input* vx_input=NULL;
-	Input* vy_input=NULL;
-	Input* meltingrate_input=NULL;
+	Input2* thickness_input=this->GetInput2(ThicknessEnum); _assert_(thickness_input);
+	Input2* calvingratex_input=NULL;
+	Input2* calvingratey_input=NULL;
+	Input2* vx_input=NULL;
+	Input2* vy_input=NULL;
+	Input2* meltingrate_input=NULL;
 	if(domaintype==Domain2DhorizontalEnum){
-		calvingratex_input=inputs->GetInput(CalvingratexEnum); _assert_(calvingratex_input);
-		calvingratey_input=inputs->GetInput(CalvingrateyEnum); _assert_(calvingratey_input);
-		vx_input=inputs->GetInput(VxEnum); _assert_(vx_input);
-		vy_input=inputs->GetInput(VyEnum); _assert_(vy_input);
-		meltingrate_input=inputs->GetInput(CalvingMeltingrateEnum); _assert_(meltingrate_input);
+		calvingratex_input=this->GetInput2(CalvingratexEnum); _assert_(calvingratex_input);
+		calvingratey_input=this->GetInput2(CalvingrateyEnum); _assert_(calvingratey_input);
+		vx_input=this->GetInput2(VxEnum); _assert_(vx_input);
+		vy_input=this->GetInput2(VyEnum); _assert_(vy_input);
+		meltingrate_input=this->GetInput2(CalvingMeltingrateEnum); _assert_(meltingrate_input);
 	}
 	else{
-		calvingratex_input=inputs->GetInput(CalvingratexAverageEnum); _assert_(calvingratex_input);
-		calvingratey_input=inputs->GetInput(CalvingrateyAverageEnum); _assert_(calvingratey_input);
+		calvingratex_input=this->GetInput2(CalvingratexAverageEnum); _assert_(calvingratex_input);
+		calvingratey_input=this->GetInput2(CalvingrateyAverageEnum); _assert_(calvingratey_input);
 	}
 
@@ -4327,9 +4644,9 @@
 	/*Get material parameters :*/
 	rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* floatingmelt_input = this->GetInput(BasalforcingsFloatingiceMeltingRateEnum); _assert_(floatingmelt_input);
-	Input* gllevelset_input = this->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
-	Input* scalefactor_input = NULL;
+	Input2* floatingmelt_input = this->GetInput2(BasalforcingsFloatingiceMeltingRateEnum); _assert_(floatingmelt_input);
+	Input2* gllevelset_input   = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+	Input2* scalefactor_input  = NULL;
 	if(scaled==true){
-		scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 	}
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
@@ -4372,9 +4689,9 @@
 	/*Get material parameters :*/
 	rho_ice=FindParam(MaterialsRhoIceEnum);
-	Input* groundedmelt_input = this->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedmelt_input);
-	Input* gllevelset_input = this->GetInput(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
-	Input* scalefactor_input = NULL;
+	Input2* groundedmelt_input = this->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedmelt_input);
+	Input2* gllevelset_input = this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gllevelset_input);
+	Input2* scalefactor_input = NULL;
 	if(scaled==true){
-		scalefactor_input = this->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 	}
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
@@ -4422,8 +4739,8 @@
 
 	/*Now get the average SMB over the element*/
-	Input* smb_input = inputs->GetInput(SmbMassBalanceEnum); _assert_(smb_input);
+	Input2* smb_input = this->GetInput2(SmbMassBalanceEnum); _assert_(smb_input);
 	smb_input->GetInputAverage(&smb);	// average smb on element in m ice s-1
 	if(scaled==true){
-		Input* scalefactor_input = inputs->GetInput(MeshScaleFactorEnum); _assert_(scalefactor_input);
+		Input2* scalefactor_input = this->GetInput2(MeshScaleFactorEnum); _assert_(scalefactor_input);
 		scalefactor_input->GetInputAverage(&scalefactor);// average scalefactor on element
 	}
@@ -4437,5 +4754,5 @@
 }
 /*}}}*/
-void       Tria::Update(int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finiteelement_type){/*{{{*/
+void       Tria::Update(Inputs2* inputs2,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finiteelement_type){/*{{{*/
 
 	/*Intermediaries*/
@@ -4581,7 +4898,4 @@
 	this->SetHookNodes(tria_node_ids,numnodes,analysis_counter); this->nodes=NULL;
 	xDelete<int>(tria_node_ids);
-
-	/*Fill with IoModel*/
-	this->InputUpdateFromIoModel(index,iomodel);
 }
 /*}}}*/
@@ -4594,6 +4908,6 @@
 
 	this->parameters->FindParam(&extrusioninput,InputToExtrudeEnum);
-	Input* input = inputs->GetInput(extrusioninput);      _assert_(input);
-	Input* onbase = inputs->GetInput(MeshVertexonbaseEnum); _assert_(onbase);
+	Input2* input = this->GetInput2(extrusioninput);      _assert_(input);
+	Input2* onbase = this->GetInput2(MeshVertexonbaseEnum); _assert_(onbase);
 
 	GaussTria* gauss=new GaussTria();
@@ -4618,6 +4932,6 @@
 
 	this->parameters->FindParam(&extrusioninput,InputToExtrudeEnum);
-	Input* input = inputs->GetInput(extrusioninput); _assert_(input);
-	Input* onsurf = inputs->GetInput(MeshVertexonsurfaceEnum); _assert_(onsurf);
+	Input2* input = this->GetInput2(extrusioninput); _assert_(input);
+	Input2* onsurf = this->GetInput2(MeshVertexonsurfaceEnum); _assert_(onsurf);
 
 	GaussTria* gauss=new GaussTria();
@@ -4675,5 +4989,5 @@
 	/*Get field on vertices (we do not allow for higher order elements!!)*/
 	IssmDouble lsf[NUMVERTICES];
-	this->GetInputListOnVertices(&lsf[0],fieldenum);
+	Element::GetInputListOnVertices(&lsf[0],fieldenum);
 
 	/*1. check that we do cross fieldvalue in this element*/
@@ -4740,35 +5054,8 @@
 void       Tria::GiaDeflection(Vector<IssmDouble>* wg,Vector<IssmDouble>* dwgdt,IssmDouble* x, IssmDouble* y){/*{{{*/
 
-	int i;
-	int gsize;
-	IssmDouble xi,yi,ri,re,area;
-	IssmDouble x0,y0;
 	IssmDouble xyz_list[NUMVERTICES][3];
 
-	/*thickness averages: */
-	IssmDouble* hes=NULL;
-	IssmDouble* times=NULL;
-	IssmDouble  currenttime;
-	int         numtimes;
-	Input* thickness_input=NULL;
-
 	/*gia solution parameters:*/
-	int cross_section_shape=0;
-
-	/*gia material parameters: */
-	IssmDouble lithosphere_shear_modulus;
-	IssmDouble lithosphere_density;
-	IssmDouble mantle_shear_modulus;
-	IssmDouble mantle_density;
-	Input* mantle_viscosity_input=NULL;
-	IssmDouble mantle_viscosity;
-	Input* lithosphere_thickness_input=NULL;
-	IssmDouble lithosphere_thickness;
-
-	/*ice properties: */
-	IssmDouble rho_ice;
-
-	/*constants: */
-	IssmDouble yts;
+	IssmDouble lithosphere_thickness,mantle_viscosity;
 
 	/*output: */
@@ -4780,45 +5067,50 @@
 
 	/*how many dofs are we working with here? */
+	int gsize;
+	IssmDouble yts;
 	this->parameters->FindParam(&gsize,MeshNumberofverticesEnum);
 	this->parameters->FindParam(&yts,ConstantsYtsEnum);
 
 	/*recover gia solution parameters: */
+	int cross_section_shape;
 	this->parameters->FindParam(&cross_section_shape,GiaCrossSectionShapeEnum);
 
 	/*what time is it? :*/
+	IssmDouble currenttime;
 	this->parameters->FindParam(&currenttime,TimeEnum);
 
 	/*recover material parameters: */
-	lithosphere_shear_modulus=FindParam(MaterialsLithosphereShearModulusEnum);
-	lithosphere_density=FindParam(MaterialsLithosphereDensityEnum);
-	mantle_shear_modulus=FindParam(MaterialsMantleShearModulusEnum);
-	mantle_density=FindParam(MaterialsMantleDensityEnum);
-	rho_ice=FindParam(MaterialsRhoIceEnum);
-
-	/*pull thickness averages: */
-	thickness_input=inputs->GetInput(ThicknessEnum);
-	if (!thickness_input)_error_("thickness input needed to compute gia deflection!");
-	thickness_input->GetInputAveragesUpToCurrentTime(&hes,&times,&numtimes,currenttime);
+	IssmDouble lithosphere_shear_modulus = FindParam(MaterialsLithosphereShearModulusEnum);
+	IssmDouble lithosphere_density       = FindParam(MaterialsLithosphereDensityEnum);
+	IssmDouble mantle_shear_modulus      = FindParam(MaterialsMantleShearModulusEnum);
+	IssmDouble mantle_density            = FindParam(MaterialsMantleDensityEnum);
+	IssmDouble rho_ice                   = FindParam(MaterialsRhoIceEnum);
+
+	/*pull thickness averages! */
+	IssmDouble *hes      = NULL;
+	IssmDouble *times    = NULL;
+	int         numtimes;
+	this->GetInputAveragesUpToCurrentTime(ThicknessEnum,&hes,&times,&numtimes,currenttime);
 
 	/*recover mantle viscosity: */
-	mantle_viscosity_input=inputs->GetInput(GiaMantleViscosityEnum);
+	Input2* mantle_viscosity_input=this->GetInput2(GiaMantleViscosityEnum);
 	if (!mantle_viscosity_input)_error_("mantle viscosity input needed to compute gia deflection!");
 	mantle_viscosity_input->GetInputAverage(&mantle_viscosity);
 
 	/*recover lithosphere thickness: */
-	lithosphere_thickness_input=inputs->GetInput(GiaLithosphereThicknessEnum);
+	Input2* lithosphere_thickness_input=this->GetInput2(GiaLithosphereThicknessEnum);
 	if (!lithosphere_thickness_input)_error_("lithosphere thickness input needed to compute gia deflection!");
 	lithosphere_thickness_input->GetInputAverage(&lithosphere_thickness);
 
 	/*pull area of this Tria: */
-	area=this->GetArea();
+	IssmDouble area=this->GetArea();
 
 	/*element radius: */
-	re=sqrt(area/PI);
+	IssmDouble re=sqrt(area/PI);
 
 	/*figure out gravity center of our element: */
 	::GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	x0=(xyz_list[0][0]+xyz_list[1][0]+xyz_list[2][0])/3.0;
-	y0=(xyz_list[0][1]+xyz_list[1][1]+xyz_list[2][1])/3.0;
+	IssmDouble x0=(xyz_list[0][0]+xyz_list[1][0]+xyz_list[2][0])/3.0;
+	IssmDouble y0=(xyz_list[0][1]+xyz_list[1][1]+xyz_list[2][1])/3.0;
 
 	/*start loading GiaDeflectionCore arguments: */
@@ -4839,8 +5131,9 @@
 	arguments.yts=yts;
 
-	for(i=0;i<gsize;i++){
+	for(int i=0;i<gsize;i++){
 		/*compute distance from the center of the tria to the vertex i: */
-		xi=x[i]; yi=y[i];
-		ri=sqrt(pow(xi-x0,2)+pow(yi-y0,2));
+		IssmDouble xi=x[i];
+		IssmDouble yi=y[i];
+		IssmDouble ri=sqrt(pow(xi-x0,2)+pow(yi-y0,2));
 
 		/*load ri onto arguments for this vertex i: */
@@ -4853,5 +5146,4 @@
 		wg->SetValue(i,wi,ADD_VAL);
 		dwgdt->SetValue(i,dwidt,ADD_VAL);
-
 	}
 
@@ -4891,5 +5183,5 @@
 
 	/*Compute ice thickness change: */
-	Input*	deltathickness_input=inputs->GetInput(EsaDeltathicknessEnum);
+	Input2* deltathickness_input=this->GetInput2(EsaDeltathicknessEnum);
 	if (!deltathickness_input)_error_("delta thickness input needed to compute elastic adjustment!");
 	deltathickness_input->GetInputAverage(&I);
@@ -5029,5 +5321,5 @@
 
 	/*Compute ice thickness change: */
-	Input*	deltathickness_input=inputs->GetInput(EsaDeltathicknessEnum);
+	Input2* deltathickness_input=this->GetInput2(EsaDeltathicknessEnum);
 	if (!deltathickness_input)_error_("delta thickness input needed to compute elastic adjustment!");
 	deltathickness_input->GetInputAverage(&I);
@@ -5185,5 +5477,5 @@
 void	Tria::SealevelriseMomentOfInertia(IssmDouble* dI_list,IssmDouble* Sg_old,IssmDouble eartharea){/*{{{*/
 	/*early return if we are not on an ice cap OR ocean:*/
-	if(!(this->inputs->Max(MaskIceLevelsetEnum)<0) && !IsWaterInElement()){
+	if(!IsIceOnlyInElement() && !IsWaterInElement()){
 		dI_list[0] = 0.0; // this is important!!!
 		dI_list[1] = 0.0; // this is important!!!
@@ -5256,5 +5548,5 @@
 		dI_list[2] = +4*PI*(rho_water*S*area)*pow(re,4)*(1-pow(sin(late),2))/eartharea;
 	}
-	else if(this->inputs->Max(MaskIceLevelsetEnum)<0){
+	else if(IsIceOnlyInElement()){
 		IssmDouble rho_ice, I;
 
@@ -5263,5 +5555,5 @@
 
 		/*Compute ice thickness change: */
-		Input*	deltathickness_input=inputs->GetInput(SealevelriseDeltathicknessEnum);
+		Input2* deltathickness_input=this->GetInput2(SealevelriseDeltathicknessEnum);
 		if (!deltathickness_input)_error_("delta thickness input needed to compute sea level rise!");
 		deltathickness_input->GetInputAverage(&I);
@@ -5275,4 +5567,24 @@
 }/*}}}*/
 void    Tria::SealevelriseEustatic(Vector<IssmDouble>* pSgi,IssmDouble* peustatic,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble oceanarea,IssmDouble eartharea){ /*{{{*/
+
+	/*Computational flags:*/
+	int bp_compute_fingerprints= 0;
+
+	/*some paramters first: */
+	this->parameters->FindParam(&bp_compute_fingerprints,DslComputeFingerprintsEnum);
+
+	if(!IsOceanInElement()){
+		/*ok, there is ocean in this element, we should compute eustatic loads for the ocean if we have requested
+		 *bottom pressure fingerprints:*/
+		if(bp_compute_fingerprints)this->SealevelriseEustaticBottomPressure(pSgi,peustatic,latitude,longitude,radius,oceanarea,eartharea);
+	}
+	//if(!IsIceInElement()){
+		/*there is ice in this eleemnt, let's compute the eustatic response for ice changes:*/
+		this->SealevelriseEustaticIce(pSgi,peustatic,latitude,longitude,radius,oceanarea,eartharea);
+	//}
+
+}
+/*}}}*/
+void    Tria::SealevelriseEustaticIce(Vector<IssmDouble>* pSgi,IssmDouble* peustatic,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble oceanarea,IssmDouble eartharea){ /*{{{*/
 
 	/*diverse:*/
@@ -5306,24 +5618,25 @@
 
 	/*early return if we are not on an ice cap:*/
-	if(!(this->inputs->Max(MaskIceLevelsetEnum)<=0)){
-		constant=0; this->inputs->AddInput(new TriaInput(SealevelEustaticMaskEnum,&constant,P0Enum));
+	if(!IsIceOnlyInElement()){
+		constant=0; this->AddInput2(SealevelEustaticMaskEnum,&constant,P0Enum);
 		*peustatic=0; //do not forget to assign this pointer, otherwise, global eustatic will be garbage!
 		return;
 	}
 
-	/*early return if we are fully floating: */
-	if (this->inputs->Max(MaskGroundediceLevelsetEnum)<=0){
-		constant=0; this->inputs->AddInput(new TriaInput(SealevelEustaticMaskEnum,&constant,P0Enum));
+	/*early return if we are fully floating:*/
+	Input2* gr_input=this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gr_input);
+	if (gr_input->GetInputMax()<=0){
+		constant=0; this->AddInput2(SealevelEustaticMaskEnum,&constant,P0Enum);
 		*peustatic=0; //do not forget to assign this pointer, otherwise, global eustatic will be garbage!
 		return;
 	}
 
-	/*If we are here, we are on ice that is fully grounded or half-way to floating: */
-	if ((this->inputs->Min(MaskGroundediceLevelsetEnum))<0){
+	/*If we are here, we are on ice that is fully grounded or half-way to floating:*/
+	if ((gr_input->GetInputMin())<0){
 		notfullygrounded=true; //used later on.
 	}
 
 	/*Inform mask: */
-	constant=1; this->inputs->AddInput(new TriaInput(SealevelEustaticMaskEnum,&constant,P0Enum));
+	constant=1; this->AddInput2(SealevelEustaticMaskEnum,&constant,P0Enum);
 
 	/*recover material parameters: */
@@ -5397,6 +5710,6 @@
 	}
 
-	/*Compute ice thickness change: */
-	Input*	deltathickness_input=inputs->GetInput(SealevelriseDeltathicknessEnum);
+	/*Compute ice thickness: */
+	Input2* deltathickness_input=this->GetInput2(SealevelriseDeltathicknessEnum);
 	if (!deltathickness_input)_error_("delta thickness input needed to compute sea level rise!");
 
@@ -5460,4 +5773,158 @@
 			/*Add all components to the pSgi or pSgo solution vectors:*/
 			values[i]=3*rho_ice/rho_earth*area/eartharea*I*(G_rigid+G_elastic);
+		}
+		pSgi->SetValues(gsize,indices,values,ADD_VAL);
+
+		/*free ressources:*/
+		xDelete<IssmDouble>(values);
+		xDelete<int>(indices);
+	}
+
+	/*Assign output pointer:*/
+	_assert_(!xIsNan<IssmDouble>(eustatic));
+	_assert_(!xIsInf<IssmDouble>(eustatic));
+	*peustatic=eustatic;
+	return;
+}
+/*}}}*/
+void    Tria::SealevelriseEustaticBottomPressure(Vector<IssmDouble>* pSgi,IssmDouble* peustatic,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble oceanarea,IssmDouble eartharea){ /*{{{*/
+
+	/*diverse:*/
+	int gsize;
+	bool spherical=true;
+	IssmDouble llr_list[NUMVERTICES][3];
+	IssmDouble area;
+	IssmDouble I;  //change in ice thickness or water level(Farrel and Clarke, Equ. 4)
+	IssmDouble rho;
+	IssmDouble late,longe,re;
+	IssmDouble lati,longi,ri;
+
+	/*elastic green function:*/
+	IssmDouble* G_elastic_precomputed=NULL;
+	int         M;
+
+	/*ice properties: */
+	IssmDouble rho_water,rho_earth;
+
+	/*constants:*/
+	IssmDouble constant=0;
+
+	/*Initialize eustatic component: do not skip this step :):*/
+	IssmDouble eustatic = 0.;
+
+	/*Computational flags:*/
+	bool computerigid = true;
+	bool computeelastic= true;
+	bool scaleoceanarea= false;
+	bool bp_compute_fingerprints= false;
+
+	/*we are here to compute fingerprints originating fromn bottom pressure loads:*/
+
+	/*Inform mask: */
+	constant=1; this->AddInput2(SealevelEustaticMaskEnum,&constant,P0Enum);
+
+	/*recover material parameters: */
+	rho_water=FindParam(MaterialsRhoFreshwaterEnum);
+	rho_earth=FindParam(MaterialsEarthDensityEnum);
+
+	/*recover love numbers and computational flags: */
+	this->parameters->FindParam(&computerigid,SealevelriseRigidEnum);
+	this->parameters->FindParam(&computeelastic,SealevelriseElasticEnum);
+	this->parameters->FindParam(&scaleoceanarea,SealevelriseOceanAreaScalingEnum);
+
+	/*recover elastic green function:*/
+	if(computeelastic){
+		DoubleVecParam* parameter = static_cast<DoubleVecParam*>(this->parameters->FindParamObject(SealevelriseGElasticEnum));
+		_assert_(parameter);
+		parameter->GetParameterValueByPointer(&G_elastic_precomputed,&M);
+	}
+
+	/*how many dofs are we working with here? */
+	this->parameters->FindParam(&gsize,MeshNumberofverticesEnum);
+
+	/* Where is the centroid of this element?:{{{*/
+
+	/*retrieve coordinates: */
+	::GetVerticesCoordinates(&llr_list[0][0],this->vertices,NUMVERTICES,spherical);
+
+	IssmDouble minlong=400;
+	IssmDouble maxlong=-20;
+	for (int i=0;i<NUMVERTICES;i++){
+		llr_list[i][0]=(90-llr_list[i][0]);
+		if(llr_list[i][1]<0)llr_list[i][1]=180+(180+llr_list[i][1]);
+		if(llr_list[i][1]>maxlong)maxlong=llr_list[i][1];
+		if(llr_list[i][1]<minlong)minlong=llr_list[i][1];
+	}
+	if(minlong==0 && maxlong>180){
+		if (llr_list[0][1]==0)llr_list[0][1]=360;
+		if (llr_list[1][1]==0)llr_list[1][1]=360;
+		if (llr_list[2][1]==0)llr_list[2][1]=360;
+	}
+
+	// correction at the north pole
+	if(llr_list[0][0]==0)llr_list[0][1]=(llr_list[1][1]+llr_list[2][1])/2.0;
+	if(llr_list[1][0]==0)llr_list[1][1]=(llr_list[0][1]+llr_list[2][1])/2.0;
+	if(llr_list[2][0]==0)llr_list[2][1]=(llr_list[0][1]+llr_list[1][1])/2.0;
+
+	//correction at the south pole
+	if(llr_list[0][0]==180)llr_list[0][1]=(llr_list[1][1]+llr_list[2][1])/2.0;
+	if(llr_list[1][0]==180)llr_list[1][1]=(llr_list[0][1]+llr_list[2][1])/2.0;
+	if(llr_list[2][0]==180)llr_list[2][1]=(llr_list[0][1]+llr_list[1][1])/2.0;
+
+	late=(llr_list[0][0]+llr_list[1][0]+llr_list[2][0])/3.0;
+	longe=(llr_list[0][1]+llr_list[1][1]+llr_list[2][1])/3.0;
+
+	late=90-late;
+	if(longe>180)longe=(longe-180)-180;
+
+	late=late/180*PI;
+	longe=longe/180*PI;
+	/*}}}*/
+
+	/*Compute area of element. For now, we dont do partially grounded elements:*/
+	area=GetAreaSpherical();
+
+	/*Compute bottom pressure change: */
+	Input2* bottompressure_change_input=this->GetInput2(DslSeaWaterPressureChangeAtSeaFloor);
+	if (!bottompressure_change_input)_error_("bottom pressure input needed to compute sea level rise fingerprint!");
+
+	/*If we are fully grounded, take the average over the element: */
+	bottompressure_change_input->GetInputAverage(&I);
+
+	/*Compute eustatic compoent:*/
+	_assert_(oceanarea>0.);
+	if(scaleoceanarea) oceanarea=3.619e+14; // use true ocean area, m^2
+
+	/*We do not need to add the bottom pressure component to the eustatic value: */
+	eustatic += 0;
+
+	if(computeelastic | computerigid){
+		int* indices=xNew<int>(gsize);
+		IssmDouble* values=xNew<IssmDouble>(gsize);
+		IssmDouble alpha;
+		IssmDouble delPhi,delLambda;
+		for(int i=0;i<gsize;i++){
+			indices[i]=i;
+
+			IssmDouble G_rigid=0;  //do not remove =0!
+			IssmDouble G_elastic=0;  //do not remove =0!
+
+			/*Compute alpha angle between centroid and current vertex : */
+			lati=latitude[i]/180*PI; longi=longitude[i]/180*PI;
+
+		   delPhi=fabs(lati-late); delLambda=fabs(longi-longe);
+			alpha=2.*asin(sqrt(pow(sin(delPhi/2),2.0)+cos(lati)*cos(late)*pow(sin(delLambda/2),2)));
+
+			//Rigid earth gravitational perturbation:
+			if(computerigid)G_rigid=1.0/2.0/sin(alpha/2.0);
+
+			//Elastic component  (from Eq 17 in Adhikari et al, GMD 2015)
+			if(computeelastic){
+				int index=reCast<int,IssmDouble>(alpha/PI*reCast<IssmDouble,int>(M-1));
+				G_elastic += G_elastic_precomputed[index];
+			}
+
+			/*Add all components to the pSgi or pSgo solution vectors:*/
+			values[i]=3*rho_water/rho_earth*area/eartharea*I*(G_rigid+G_elastic);
 		}
 		pSgi->SetValues(gsize,indices,values,ADD_VAL);
@@ -5509,8 +5976,8 @@
 	/*early return if we are not on the ocean:*/
 	if (!IsWaterInElement()){
-		constant=0; this->inputs->AddInput(new TriaInput(SealevelEustaticOceanMaskEnum,&constant,P0Enum));
+		constant=0; this->AddInput2(SealevelEustaticOceanMaskEnum,&constant,P0Enum);
 		return;
 	}
-	constant=1; this->inputs->AddInput(new TriaInput(SealevelEustaticOceanMaskEnum,&constant,P0Enum));
+	constant=1; this->AddInput2(SealevelEustaticOceanMaskEnum,&constant,P0Enum);
 
 	/*recover computational flags: */
@@ -5661,8 +6128,9 @@
 
 	/*early return if we are not on the ocean or on an ice cap:*/
-	if(!(this->inputs->Max(MaskIceLevelsetEnum)<0) && !IsWaterInElement()) return;
+	if(!IsIceOnlyInElement() && !IsWaterInElement()) return;
 
 	/*early return if we are fully floating: */
-	if (this->inputs->Max(MaskGroundediceLevelsetEnum)<=0)return;
+	Input2* gr_input=this->GetInput2(MaskGroundediceLevelsetEnum); _assert_(gr_input);
+	if(gr_input->GetInputMax()<=0)return;
 
 	/*recover computational flags: */
@@ -5740,5 +6208,5 @@
 
 	/*Compute ice thickness change: */
-	Input*	deltathickness_input=inputs->GetInput(SealevelriseDeltathicknessEnum);
+	Input2* deltathickness_input=this->GetInput2(SealevelriseDeltathicknessEnum);
 	if (!deltathickness_input)_error_("delta thickness input needed to compute sea level rise!");
 	deltathickness_input->GetInputAverage(&I);
@@ -5795,5 +6263,5 @@
 
 		/*Add all components to the pUp solution vectors:*/
-		if(this->inputs->Max(MaskIceLevelsetEnum)<0){
+		if(IsIceOnlyInElement()){
 			U_values[i]+=3*rho_ice/rho_earth*area/eartharea*I*U_elastic[i];
 			if(horiz){
@@ -5833,48 +6301,38 @@
 void       Tria::InputUpdateFromMatrixDakota(IssmDouble* matrix, int nrows, int ncols, int name, int type){/*{{{*/
 
-	int             i,t,row;
-	IssmDouble      time;
-	TransientInput *transientinput = NULL;
-	IssmDouble      values[3];
-	IssmDouble      value;
-
 	/*Check that name is an element input*/
 	if(!IsInputEnum(name)) _error_("Enum "<<EnumToStringx(name)<<" is not in IsInput");
+	TransientInput2* transientinput = inputs2->GetTransientInput(name);
 
 	switch(type){
 
 		case VertexEnum:
+
+			/*Get LID lists once for all*/
+			IssmDouble  values[NUMVERTICES];
+			int         lidlist[NUMVERTICES];
+			this->GetVerticesLidList(&lidlist[0]);
+
 			/*Create transient input: */
-			for(t=0;t<ncols;t++){ //ncols is the number of times
-
-				/*create input values: */
-				for(i=0;i<3;i++){
-					row=this->vertices[i]->Sid();
+			for(int t=0;t<ncols;t++){ //ncols is the number of times
+				for(int i=0;i<3;i++){
+					int row=this->vertices[i]->Sid();
 					values[i]=matrix[ncols*row+t];
 				}
 
 				/*time:*/
-				time=matrix[(nrows-1)*ncols+t];
-
-				if(t==0) transientinput=new TransientInput(name);
-				transientinput->AddTimeInput(new TriaInput(name,values,P1Enum),time);
-				transientinput->Configure(parameters);
+				IssmDouble time=matrix[(nrows-1)*ncols+t];
+
+				transientinput->AddTriaTimeInput(t,NUMVERTICES,&lidlist[0],&values[0],P1Enum);
 			}
-			this->inputs->AddInput(transientinput);
 			break;
 
 		case ElementEnum:
 			/*Get value for the element: */
-			for(t=0;t<ncols;t++){ //ncols is the number of times
-				value=matrix[ncols*(this->Sid())+t];
-
-				/*time:*/
-				time=matrix[(nrows-1)*ncols+t];
-
-				if(t==0) transientinput=new TransientInput(name);
-				transientinput->AddTimeInput(new TriaInput(name,&value,P0Enum),time);
-				transientinput->Configure(parameters);
+			for(int t=0;t<ncols;t++){ //ncols is the number of times
+				IssmDouble value=matrix[ncols*(this->Sid())+t];
+				IssmDouble time=matrix[(nrows-1)*ncols+t];
+				transientinput->AddTriaTimeInput(t,1,&(this->lid),&value,P0Enum);
 			}
-			this->inputs->AddInput(transientinput);
 			break;
 
@@ -5882,5 +6340,4 @@
 			_error_("type " << type << " (" << EnumToStringx(type) << ") not implemented yet");
 	}
-
 }
 /*}}}*/
@@ -5914,8 +6371,8 @@
 
 					/*retrieve inputs: */
-					GetInputListOnVertices(&thickness_init[0],ThicknessEnum);
-					GetInputListOnVertices(&hydrostatic_ratio[0],GeometryHydrostaticRatioEnum);
-					GetInputListOnVertices(&bed[0],BaseEnum);
-					GetInputListOnVertices(&surface[0],SurfaceEnum);
+					Element::GetInputListOnVertices(&thickness_init[0],ThicknessEnum);
+					Element::GetInputListOnVertices(&hydrostatic_ratio[0],GeometryHydrostaticRatioEnum);
+					Element::GetInputListOnVertices(&bed[0],BaseEnum);
+					Element::GetInputListOnVertices(&surface[0],SurfaceEnum);
 
 					/*build new bed and surface: */
@@ -5957,14 +6414,14 @@
 
 					/*Add new inputs: */
-					this->inputs->AddInput(new TriaInput(ThicknessEnum,thickness,P1Enum));
-					this->inputs->AddInput(new TriaInput(BaseEnum,bed,P1Enum));
-					this->inputs->AddInput(new TriaInput(SurfaceEnum,surface,P1Enum));
+					this->AddInput2(ThicknessEnum,thickness,P1Enum);
+					this->AddInput2(BaseEnum,bed,P1Enum);
+					this->AddInput2(SurfaceEnum,surface,P1Enum);
 
 					break;
 				case MaterialsRheologyBEnum:
-					this->inputs->AddInput(new TriaInput(MaterialsRheologyBbarEnum,values,P1Enum));
+					this->AddInput2(MaterialsRheologyBbarEnum,values,P1Enum);
 					break;
 				default:
-					this->inputs->AddInput(new TriaInput(name,values,P1Enum));
+					this->AddInput2(name,values,P1Enum);
 			}
 			break;
@@ -5974,5 +6431,5 @@
 			/*Get value for the element: */
 			value=vector[this->Sid()]; //careful, vector of values here is not parallel distributed, but serial distributed (from a serial Dakota core!)
-			this->inputs->AddInput(new TriaInput(name,&value,P0Enum));
+			this->AddInput2(name,&value,P0Enum);
 			break;
 		default:
Index: /issm/trunk/src/c/classes/Elements/Tria.h
===================================================================
--- /issm/trunk/src/c/classes/Elements/Tria.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Elements/Tria.h	(revision 24686)
@@ -1,3 +1,3 @@
-/*! \file Tria.h 
+/*! \file Tria.h
  *  \brief: header file for tria object
  */
@@ -30,8 +30,9 @@
 
 	public:
+		int iscollapsed;
 
 		/*Tria constructors, destructors {{{*/
 		Tria(){};
-		Tria(int tria_id,int tria_sid,IoModel* iomodel,int nummodels);
+		Tria(int tria_id,int tria_sid,int tria_lid,IoModel* iomodel,int nummodels);
 		~Tria();
 		/*}}}*/
@@ -63,5 +64,5 @@
 		void        ComputeStressTensor();
 		void        ComputeSurfaceNormalVelocity();
-		void        Configure(Elements* elements,Loads* loads,Nodes* nodesin,Vertices* verticesin,Materials* materials,Parameters* parameters);
+		void        Configure(Elements* elements,Loads* loads,Nodes* nodesin,Vertices* verticesin,Materials* materials,Parameters* parameters,Inputs2* inputs2in);
 		void        ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index,int offset,int N, int M);
 		void        ControlInputSetGradient(IssmDouble* gradient,int enum_type,int control_index);
@@ -83,4 +84,6 @@
 		IssmDouble  GetIcefrontArea();
 		void	      GetIcefrontCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum);
+		void        GetInputListOnVertices(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
+		void        GetInputListOnNodes(IssmDouble* pvalue,Input2* input,IssmDouble default_value);
 		void	      GetLevelCoordinates(IssmDouble** pxyz_front,IssmDouble* xyz_list,int levelsetenum,IssmDouble level);
 		int         GetVertexIndex(Vertex* vertex);
@@ -102,9 +105,8 @@
 		void        InputDepthAverageAtBase(int enum_type,int average_enum_type);
 		void        InputExtrude(int enum_type,int start){_error_("not implemented"); /*For penta only*/};
+		void        ControlInputExtrude(int enum_type,int start){/*For penta only*/};
 		bool	   	IsFaceOnBoundary(void);
 		bool	   	IsIcefront(void);
 		bool        IsNodeOnShelfFromFlags(IssmDouble* flags);
-		bool        IsOnBase();
-		bool        IsOnSurface();
 		bool        IsZeroLevelset(int levelset_enum);
 		IssmDouble  Masscon(IssmDouble* levelset);
@@ -124,4 +126,7 @@
 		void        ResetLevelsetFromSegmentlist(IssmDouble* segments,int numsegments);
 		void        RignotMeltParameterization();
+		void        SetElementInput(int enum_in,IssmDouble values);
+		void        SetElementInput(Inputs2* inputs2,int enum_in,IssmDouble values);
+		void        SetElementInput(Inputs2* inputs2,int numindices,int* indices,IssmDouble* values,int enum_in);
 		void        SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index,int offset,int N,int M);
 		void        SetControlInputsFromVector(IssmDouble* vector,int control_enum,int control_index);
@@ -140,5 +145,5 @@
 		IssmDouble  TotalGroundedBmb(bool scaled);
 		IssmDouble  TotalSmb(bool scaled);
-		void        Update(int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
+		void        Update(Inputs2* inputs2,int index, IoModel* iomodel,int analysis_counter,int analysis_type,int finitelement);
 		int         UpdatePotentialUngrounding(IssmDouble* vertices_potentially_ungrounding,Vector<IssmDouble>* vec_nodes_on_iceshelf,IssmDouble* nodes_on_iceshelf);
 		void        ValueP1DerivativesOnGauss(IssmDouble* dvalue,IssmDouble* values,IssmDouble* xyz_list,Gauss* gauss);
@@ -160,6 +165,8 @@
 		IssmDouble OceanArea(void);
 		IssmDouble OceanAverage(IssmDouble* Sg);
-		void    SealevelriseMomentOfInertia(IssmDouble* dI_list,IssmDouble* Sg_old,IssmDouble eartharea); 
+		void    SealevelriseMomentOfInertia(IssmDouble* dI_list,IssmDouble* Sg_old,IssmDouble eartharea);
 		void    SealevelriseEustatic(Vector<IssmDouble>* pSgi,IssmDouble* peustatic,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble oceanarea,IssmDouble eartharea);
+		void    SealevelriseEustaticIce(Vector<IssmDouble>* pSgi,IssmDouble* peustatic,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble oceanarea,IssmDouble eartharea);
+		void    SealevelriseEustaticBottomPressure(Vector<IssmDouble>* pSgi,IssmDouble* peustatic,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble oceanarea,IssmDouble eartharea);
 		void    SealevelriseNonEustatic(Vector<IssmDouble>* pSgo,IssmDouble* Sg_old,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble eartharea);
 		void    SealevelriseGeodetic(Vector<IssmDouble>* pUp,Vector<IssmDouble>* pNorth,Vector<IssmDouble>* pEast,IssmDouble* Sg,IssmDouble* latitude,IssmDouble* longitude,IssmDouble* radius,IssmDouble* xx,IssmDouble* yy,IssmDouble* zz,IssmDouble eartharea,int horiz);
@@ -167,8 +174,10 @@
 		/*}}}*/
 		/*Tria specific routines:{{{*/
-		void           AddBasalInput(int input_enum, IssmDouble* values, int interpolation_enum);
-		void           AddInput(int input_enum, IssmDouble* values, int interpolation_enum);
-		void           AddControlInput(int input_enum, IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id);
-		void           DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,IoModel* iomodel,int input_enum);
+		void           AddBasalInput2(int input_enum, IssmDouble* values, int interpolation_enum);
+		void           AddInput2(int input_enum, IssmDouble* values, int interpolation_enum);
+		void           AddControlInput(int input_enum,Inputs2* inputs2,IoModel* iomodel,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max, int interpolation_enum,int id);
+		void           DatasetInputCreate(IssmDouble* array,int M,int N,int* individual_enums,int num_inputs,Inputs2* inputs2,IoModel* iomodel,int input_enum);
+		void           CreateInputTimeAverage(int transientinput_enum,int averagedinput_enum,IssmDouble init_time,IssmDouble end_time);
+		void           GetInputAveragesUpToCurrentTime(int input_enum,IssmDouble** pvalues, IssmDouble** ptimes, int* pnumtimes, IssmDouble currenttime);
 		IssmDouble     GetArea(void);
 		IssmDouble     GetHorizontalSurfaceArea(void);
@@ -178,4 +187,8 @@
 		void           GetAreaCoordinates(IssmDouble *area_coordinates,IssmDouble* xyz_zero,IssmDouble* xyz_list,int numpoints);
 		int            GetElementType(void);
+		Input2*        GetInput2(int enumtype);
+		Input2*        GetInput2(int enumtype,IssmDouble time);
+		Input2*        GetInput2(int inputenum,IssmDouble start_time, IssmDouble end_time);
+		DatasetInput2* GetDatasetInput2(int inputenum);
 		void           GetInputValue(IssmDouble* pvalue,Node* node,int enumtype);
 		void           GetInputValue(IssmDouble* pvalue,Vertex* vertex,int enumtype);
@@ -213,4 +226,5 @@
 		void           NormalTop(IssmDouble* normal,IssmDouble* xyz_list);
 		void           SetTemporaryElementType(int element_type_in){_error_("not implemented yet");};
+		void           InputServe(Input2* input_in);
 		Seg*	         SpawnSeg(int index1,int index2);
 		IssmDouble     StabilizationParameter(IssmDouble u, IssmDouble v, IssmDouble w, IssmDouble diameter, IssmDouble kappa){_error_("not implemented yet");};
@@ -218,5 +232,4 @@
 		void           UpdateConstraintsExtrudeFromBase(void);
 		void           UpdateConstraintsExtrudeFromTop(void);
-		void           ViscousHeating(IssmDouble* pphi,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){_error_("not implemented yet");};
 		/*}}}*/
 
Index: /issm/trunk/src/c/classes/FemModel.cpp
===================================================================
--- /issm/trunk/src/c/classes/FemModel.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/FemModel.cpp	(revision 24686)
@@ -17,8 +17,15 @@
 #include "../shared/Enum/Enum.h"
 #include "../analyses/analyses.h"
+#include "./Inputs2/DatasetInput2.h"
+#include "./Inputs2/ElementInput2.h"
+#include "./Inputs2/TransientInput2.h"
 
 #if _HAVE_CODIPACK_
 #include <sstream> // for output of the CoDiPack tape
 extern CoDi_global codi_global;
+#endif
+
+#if defined(_HAVE_NEOPZ_) && !defined(_HAVE_AD_)
+#include <TPZRefPatternDataBase.h>
 #endif
 
@@ -161,5 +168,4 @@
 	/*Save communicator in the parameters dataset: */
 	this->parameters->AddObject(new GenericParam<ISSM_MPI_Comm>(incomm,FemModelCommEnum));
-
 }
 /*}}}*/
@@ -196,4 +202,5 @@
 	if(materials)delete materials;
 	if(parameters)delete parameters;
+	if(inputs2)delete inputs2;
 	if(results)delete results;
 
@@ -336,4 +343,5 @@
 	output->materials=static_cast<Materials*>(this->materials->Copy());
 	output->parameters=static_cast<Parameters*>(this->parameters->Copy());
+	output->inputs2=static_cast<Inputs2*>(this->inputs2->Copy());
 	output->results=static_cast<Results*>(this->results->Copy());
 	output->vertices=static_cast<Vertices*>(this->vertices->Copy());
@@ -354,5 +362,5 @@
 		SpcNodesx(output->nodes_list[i],output->constraints_list[i],output->parameters);
 		NodesDofx(output->nodes_list[i],output->parameters);
-		ConfigureObjectsx(output->elements,output->loads_list[i],output->nodes_list[i],output->vertices,output->materials,output->parameters);
+		ConfigureObjectsx(output->elements,output->loads_list[i],output->nodes_list[i],output->vertices,output->materials,output->parameters,output->inputs2);
 	}
 
@@ -428,5 +436,5 @@
 
 	/*create datasets for all analyses*/
-	ModelProcessorx(&this->elements,&this->nodes_list,&this->vertices,&this->materials,&this->constraints_list,&this->loads_list,&this->parameters,iomodel,toolkitsoptionsfid,rootpath,this->solution_type,this->nummodels,this->analysis_type_list);
+	ModelProcessorx(&this->elements,&this->nodes_list,&this->vertices,&this->materials,&this->constraints_list,&this->loads_list,&this->parameters,&this->inputs2,iomodel,toolkitsoptionsfid,rootpath,this->solution_type,this->nummodels,this->analysis_type_list);
 
 	/*do the post-processing of the datasets to get an FemModel that can actually run analyses: */
@@ -437,5 +445,5 @@
 
 		if(VerboseMProcessor()) _printf0_("      configuring element and loads\n");
-		ConfigureObjectsx(this->elements,this->loads,this->nodes,this->vertices,this->materials,this->parameters);
+		ConfigureObjectsx(this->elements,this->loads,this->nodes,this->vertices,this->materials,this->parameters,this->inputs2);
 
 		if(i==0){
@@ -462,4 +470,5 @@
 		delete this->materials;
 		delete this->parameters;
+		delete this->inputs2;
 		if(this->constraints_list && this->nummodels){
 			for(i=0;i<this->nummodels;i++) delete this->constraints_list[i];
@@ -481,4 +490,5 @@
 		this->materials   = new Materials();
 		this->parameters  = new Parameters();
+		this->inputs2     = new Inputs2();
 		this->results     = new Results();
 		this->nodes       = new Nodes();
@@ -496,4 +506,5 @@
 	this->materials->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
 	this->parameters->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+	this->inputs2->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
 	this->results->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
 	this->vertices->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
@@ -527,5 +538,5 @@
 			SpcNodesx(this->nodes_list[i],this->constraints_list[i],this->parameters);
 			NodesDofx(this->nodes_list[i],this->parameters);
-			ConfigureObjectsx(this->elements,this->loads_list[i],this->nodes_list[i],this->vertices,this->materials,this->parameters);
+			ConfigureObjectsx(this->elements,this->loads_list[i],this->nodes_list[i],this->vertices,this->materials,this->parameters,this->inputs2);
 		}
 
@@ -942,5 +953,4 @@
 		#endif
 		_printf0_("\n");
-
 	}
 }
@@ -968,11 +978,11 @@
 		/* Get node coordinates*/
 		element->GetVerticesCoordinates(&xyz_list);
-		Input* weights_input                   = element->GetInput(InversionCostFunctionsCoefficientsEnum);   _assert_(weights_input);
-		Input* thickness_input                 = element->GetInput(ThicknessEnum); _assert_(thickness_input);
-		Input* vx_input                        = element->GetInput(VxEnum);                                  _assert_(vx_input);
-		Input* vy_input                        = element->GetInput(VyEnum);                                  _assert_(vy_input);
-		Input* surface_mass_balance_input      = element->GetInput(SmbMassBalanceEnum);          _assert_(surface_mass_balance_input);
-		Input* groundedice_melting_input       = element->GetInput(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedice_melting_input);
-		Input* dhdt_input                      = element->GetInput(BalancethicknessThickeningRateEnum);      _assert_(dhdt_input);
+		DatasetInput2* weights_input                   = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum);   _assert_(weights_input);
+		Input2* thickness_input                 = element->GetInput2(ThicknessEnum); _assert_(thickness_input);
+		Input2* vx_input                        = element->GetInput2(VxEnum);                                  _assert_(vx_input);
+		Input2* vy_input                        = element->GetInput2(VyEnum);                                  _assert_(vy_input);
+		Input2* surface_mass_balance_input      = element->GetInput2(SmbMassBalanceEnum);          _assert_(surface_mass_balance_input);
+		Input2* groundedice_melting_input       = element->GetInput2(BasalforcingsGroundediceMeltingRateEnum); _assert_(groundedice_melting_input);
+		Input2* dhdt_input                      = element->GetInput2(BalancethicknessThickeningRateEnum);      _assert_(dhdt_input);
 
 		/* Start  looping on the number of gaussian points: */
@@ -1534,5 +1544,5 @@
 void FemModel::IcefrontAreax(){/*{{{*/
 
-	int numvertices      = this->GetElementsWidth();
+	int numvertices      = 6;
 	int numbasins;
 	IssmDouble* BasinId   = xNew<IssmDouble>(numvertices);
@@ -1597,5 +1607,33 @@
 
 }/*}}}*/
+void FemModel::InputMakeDiscontinuous(int enum_in){/*{{{*/
+
+	int numvertices  = 6;
+	IssmDouble* P1DGlist = xNew<IssmDouble>(numvertices);
+
+	for(int i=0;i<this->elements->Size();i++){
+		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
+		element->GetInputListOnVertices(P1DGlist,enum_in);
+		element->AddInput2(DummyEnum,P1DGlist,P1DGEnum);
+	}
+	xDelete<IssmDouble>(P1DGlist);
+
+	this->inputs2->ChangeEnum(DummyEnum,enum_in);
+	this->inputs2->DeleteInput(DummyEnum);
+
+}/*}}}*/
 void FemModel::GroundinglineMassFluxx(IssmDouble* pM, bool scaled){/*{{{*/
+
+	/*First we need to depth average the velocities*/
+	int domaintype;
+	this->parameters->FindParam(&domaintype,DomainTypeEnum);
+	this->parameters->SetParam(VxEnum,InputToDepthaverageInEnum);
+	this->parameters->SetParam(VxAverageEnum,InputToDepthaverageOutEnum);
+	depthaverage_core(this);
+	if(domaintype!=Domain2DverticalEnum){
+		this->parameters->SetParam(VyEnum,InputToDepthaverageInEnum);
+		this->parameters->SetParam(VyAverageEnum,InputToDepthaverageOutEnum);
+		depthaverage_core(this);
+	}
 
 	IssmDouble local_mass_flux = 0;
@@ -1730,5 +1768,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxabsvx=element->inputs->MaxAbs(VxEnum);
+		Input2*  input = element->GetInput2(VxEnum);
+		element_maxabsvx=input->GetInputMaxAbs();
 		if(element_maxabsvx>maxabsvx) maxabsvx=element_maxabsvx;
 	}
@@ -1754,5 +1793,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxabsvy=element->inputs->MaxAbs(VyEnum);
+		Input2*  input = element->GetInput2(VyEnum);
+		element_maxabsvy=input->GetInputMaxAbs();
 		if(element_maxabsvy>maxabsvy) maxabsvy=element_maxabsvy;
 	}
@@ -1778,5 +1818,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxabsvz=element->inputs->MaxAbs(VzEnum);
+		Input2*  input = element->GetInput2(VzEnum);
+		element_maxabsvz=input->GetInputMaxAbs();
 		if(element_maxabsvz>maxabsvz) maxabsvz=element_maxabsvz;
 	}
@@ -1821,5 +1862,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxvel = element->inputs->Max(VelEnum);
+		Input2* vel_input = element->GetInput2(VelEnum); _assert_(vel_input);
+		element_maxvel = vel_input->GetInputMax();
 		if(element_maxvel>maxvel) maxvel=element_maxvel;
 	}
@@ -1845,5 +1887,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxvx = element->inputs->Max(VxEnum);
+		Input2* vx_input = element->GetInput2(VxEnum); _assert_(vx_input);
+		element_maxvx = vx_input->GetInputMax();
 		if(element_maxvx>maxvx) maxvx=element_maxvx;
 	}
@@ -1869,5 +1912,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxvy = element->inputs->Max(VyEnum);
+		Input2* vy_input = element->GetInput2(VyEnum); _assert_(vy_input);
+		element_maxvy = vy_input->GetInputMax();
 		if(element_maxvy>maxvy) maxvy=element_maxvy;
 	}
@@ -1893,5 +1937,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_maxvz = element->inputs->Max(VzEnum);
+		Input2* vz_input = element->GetInput2(VzEnum); _assert_(vz_input);
+		element_maxvz = vz_input->GetInputMax();
 		if(element_maxvz>maxvz) maxvz=element_maxvz;
 	}
@@ -1917,5 +1962,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_minvel = element->inputs->Min(VelEnum);
+		Input2*  input = element->GetInput2(VelEnum);
+		element_minvel =input->GetInputMin();
 		if(element_minvel<minvel) minvel=element_minvel;
 	}
@@ -1941,5 +1987,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_minvx = element->inputs->Min(VxEnum);
+		Input2*  input = element->GetInput2(VxEnum);
+		element_minvx =input->GetInputMin();
 		if(element_minvx<minvx) minvx=element_minvx;
 	}
@@ -1965,5 +2012,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_minvy = element->inputs->Min(VyEnum);
+		Input2*  input = element->GetInput2(VyEnum);
+		element_minvy =input->GetInputMin();
 		if(element_minvy<minvy) minvy=element_minvy;
 	}
@@ -1989,5 +2037,6 @@
 	for(i=0;i<this->elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
-		element_minvz = element->inputs->Min(VzEnum);
+		Input2*  input = element->GetInput2(VzEnum);
+		element_minvz =input->GetInputMin();
 		if(element_minvz<minvz) minvz=element_minvz;
 	}
@@ -2024,6 +2073,6 @@
 
 		/*Retrieve all inputs we will be needing: */
-		Input* weights_input   =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-		Input* omega_input =element->GetInput(BalancethicknessOmegaEnum);                   _assert_(omega_input);
+		DatasetInput2* weights_input = element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+		Input2* omega_input   = element->GetInput2(BalancethicknessOmegaEnum);              _assert_(omega_input);
 
 		/* Start  looping on the number of gaussian points: */
@@ -2081,7 +2130,7 @@
 
 		/*Retrieve all inputs we will be needing: */
-		Input* weights_input =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-		Input* omega_input   =element->GetInput(BalancethicknessOmegaEnum);              _assert_(omega_input);
-		Input* omega0_input  =element->GetInput(BalancethicknessOmega0Enum);             _assert_(omega0_input);
+		DatasetInput2* weights_input =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+		Input2* omega_input   =element->GetInput2(BalancethicknessOmegaEnum);              _assert_(omega_input);
+		Input2* omega0_input  =element->GetInput2(BalancethicknessOmega0Enum);             _assert_(omega0_input);
 
 		/* Start  looping on the number of gaussian points: */
@@ -2356,4 +2405,11 @@
 					default:
 
+					/*Some preliminary calculation may be required (use similar syntax for other inputs)*/
+						if(output_enum==NewDamageEnum){
+							InputDuplicatex(this,DamageDEnum,DamageDOldEnum);
+							InputDuplicatex(this,DamageDbarEnum,DamageDbarOldEnum);
+							this->ElementOperationx(&Element::ComputeNewDamage);
+						}
+
 						/*Vector layout*/
 						if(!IsInputEnum(output_enum)) _error_("Cannot output \""<<EnumToStringx(output_enum)<<"\" because it is not an input");
@@ -2366,5 +2422,5 @@
 							Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(j));
 							element->ResultInterpolation(&rank_interpolation,&rank_nodesperelement,&rank_arraysize,output_enum);
-							if (rank_arraysize>max_rank_arraysize)max_rank_arraysize=rank_arraysize;
+							if(rank_arraysize>max_rank_arraysize)max_rank_arraysize=rank_arraysize;
 						}
 						rank_arraysize=max_rank_arraysize;
@@ -2440,5 +2496,5 @@
 								for(int j=0;j<elements->Size();j++){
 									Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(j));
-									element->ResultToMatrix(values,ncols, output_enum);
+									element->ResultToMatrix(values,ncols,output_enum);
 								}
 								/*Gather from all cpus*/
@@ -2462,4 +2518,8 @@
 		}
 	}
+
+	/*Clean up*/
+	for(int i=0;i<numonnodes;i++) xDelete<char>(resultsonnodes[i]);
+	xDelete<char*>(resultsonnodes);
 
 	/*Assign pointer and clean up*/
@@ -2620,7 +2680,7 @@
 
 		 /*Retrieve all inputs we will be needing: */
-		 Input* weights_input   =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-		 Input* surface_input   =element->GetInput(SurfaceEnum);                            _assert_(surface_input);
-		 Input* surfaceobs_input=element->GetInput(InversionSurfaceObsEnum);                _assert_(surfaceobs_input);
+		 DatasetInput2* weights_input   =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+		 Input2* surface_input   =element->GetInput2(SurfaceEnum);                            _assert_(surface_input);
+		 Input2* surfaceobs_input=element->GetInput2(InversionSurfaceObsEnum);                _assert_(surfaceobs_input);
 
 		 /* Start  looping on the number of gaussian points: */
@@ -2676,6 +2736,6 @@
 
 		/*Retrieve all inputs we will be needing: */
-		Input* weights_input   =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-		Input* thickness_input =element->GetInput(ThicknessEnum);                          _assert_(thickness_input);
+		DatasetInput2* weights_input   =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+		Input2* thickness_input =element->GetInput2(ThicknessEnum);                          _assert_(thickness_input);
 
 		/* Start  looping on the number of gaussian points: */
@@ -2710,4 +2770,67 @@
 }
 /*}}}*/
+void FemModel::ThicknessAverage(){/*{{{*/
+
+	int elementswidth                   = this->GetElementsWidth();//just 2D mesh, tria elements
+   int numberofvertices                = this->vertices->NumberOfVertices();//total number of vertices
+
+   IssmDouble weight                   = 0.;
+   IssmDouble* totalweight             = NULL;
+	IssmDouble* Hserial						= NULL;
+   IssmDouble* H                       = xNew<IssmDouble>(elementswidth);
+   Vector<IssmDouble>* vecH				= new Vector<IssmDouble>(numberofvertices);
+   Vector<IssmDouble>* vectotalweight  = new Vector<IssmDouble>(numberofvertices);
+
+   for(int i=0;i<this->elements->Size();i++){
+      Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
+
+		/*check if there is ice in this element*/
+		if(!element->IsIceInElement()) continue;
+
+		/*get H on the vertices*/
+		element->GetInputListOnVertices(H,ThicknessEnum);
+
+      /*weight to calculate the smoothed H*/
+      weight=1.;//simple average
+
+		/*add in the serial vector*/
+      vecH->SetValue(element->vertices[0]->Sid(),weight*H[0],ADD_VAL);
+      vecH->SetValue(element->vertices[1]->Sid(),weight*H[1],ADD_VAL);
+      vecH->SetValue(element->vertices[2]->Sid(),weight*H[2],ADD_VAL);
+      /*total weight*/
+      vectotalweight->SetValue(element->vertices[0]->Sid(),weight,ADD_VAL);
+      vectotalweight->SetValue(element->vertices[1]->Sid(),weight,ADD_VAL);
+      vectotalweight->SetValue(element->vertices[2]->Sid(),weight,ADD_VAL);
+   }
+
+   /*Assemble and serialize*/
+   vecH->Assemble();
+   vectotalweight->Assemble();
+   Hserial=vecH->ToMPISerial();
+   totalweight=vectotalweight->ToMPISerial();
+
+   /*Divide for the total weight*/
+   for(int i=0;i<numberofvertices;i++){
+      _assert_(totalweight[i]>0);
+      Hserial[i]=Hserial[i]/totalweight[i];
+   }
+
+   /*Set element inputs*/
+   for(int i=0;i<this->elements->Size();i++){
+      Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(i));
+		H[0]=Hserial[element->vertices[0]->Sid()];
+		H[1]=Hserial[element->vertices[1]->Sid()];
+		H[2]=Hserial[element->vertices[2]->Sid()];
+		element->AddInput2(ThicknessEnum,H,P1Enum);
+	}
+
+ 	/*Cleanup*/
+   delete vecH;
+   delete vectotalweight;
+   xDelete<IssmDouble>(H);
+   xDelete<IssmDouble>(Hserial);
+   xDelete<IssmDouble>(totalweight);
+}
+/*}}}*/
 void FemModel::ThicknessPositivex(IssmDouble* pJ){/*{{{*/
 
@@ -2732,6 +2855,6 @@
 
 		/*Retrieve all inputs we will be needing: */
-		Input* weights_input   =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-		Input* thickness_input =element->GetInput(ThicknessEnum);                          _assert_(thickness_input);
+		DatasetInput2* weights_input   =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+		Input2* thickness_input =element->GetInput2(ThicknessEnum);                          _assert_(thickness_input);
 
 		/* Start  looping on the number of gaussian points: */
@@ -3041,4 +3164,7 @@
 	}
 
+	/*Creating inputs*/
+	Inputs2* new_inputs2=new Inputs2(newnumberofelements,newnumberofvertices);
+
 	/*Creating materials*/
 	Materials* new_materials=new Materials();
@@ -3104,16 +3230,17 @@
 		}
 
-		ConfigureObjectsx(new_elements,this->loads,new_nodes_list[i],new_vertices,new_materials,this->parameters);
+		ConfigureObjectsx(new_elements,this->loads,new_nodes_list[i],new_vertices,new_materials,this->parameters,new_inputs2);
 		SpcNodesx(new_nodes_list[i],new_constraints_list[i],this->parameters);
 		NodesDofx(new_nodes_list[i],this->parameters);
 	}
 
-	/*Finally: interpolate all inputs and insert them into the new elements.*/
-	this->InterpolateInputs(new_vertices,new_elements);
+	/*Interpolate all inputs and insert them into the new elements.*/
+	this->InterpolateInputs(new_vertices,new_elements,new_inputs2);
 
 	/*Delete old structure and set new pointers*/
-	delete this->vertices;		this->vertices		= new_vertices;
-	delete this->elements;		this->elements		= new_elements;
-	delete this->materials;		this->materials	= new_materials;
+	delete this->inputs2;   this->inputs2 = new_inputs2;
+	delete this->vertices;  this->vertices = new_vertices;
+	delete this->elements;  this->elements = new_elements;
+	delete this->materials; this->materials = new_materials;
 	if(this->constraints_list && this->nummodels){
 		for(int i=0;i<this->nummodels;i++) delete this->constraints_list[i];
@@ -3127,4 +3254,5 @@
 	this->nodes_list = new_nodes_list;
 
+	/*Reset mask*/
 	GetMaskOfIceVerticesLSMx0(this);
 
@@ -3174,5 +3302,5 @@
 		}
 		/*insert new bedrock*/
-		element->AddInput(BedEnum,&r[0],P1Enum);
+		element->AddInput2(BedEnum,&r[0],P1Enum);
 		/*Cleanup*/
 		xDelete<IssmDouble>(xyz_list);
@@ -3222,7 +3350,7 @@
 
 		/*Update inputs*/
-		element->AddInput(MaskGroundediceLevelsetEnum,&phi[0],P1Enum);
-		element->AddInput(ThicknessEnum,&h[0],P1Enum);
-		element->AddInput(BaseEnum,&b[0],P1Enum);
+		element->AddInput2(MaskGroundediceLevelsetEnum,&phi[0],P1Enum);
+		element->AddInput2(ThicknessEnum,&h[0],P1Enum);
+		element->AddInput2(BaseEnum,&b[0],P1Enum);
 	}
 
@@ -3238,33 +3366,23 @@
 void FemModel::GetInputs(int* pnumP0inputs,IssmDouble** pP0inputs,int** pP0input_enums,int** pP0input_interp,int* pnumP1inputs,IssmDouble** pP1inputs,int** pP1input_enums,int** pP1input_interp){/*{{{*/
 
-	int maxinputs										= MaximumNumberOfDefinitionsEnum;
-	int numberofvertices								= this->vertices->NumberOfVertices();
-	int numberofelements								= this->elements->NumberOfElements();
-	int elementswidth									= this->GetElementsWidth();
-	int numP0inputs									= -1;
+	int numberofvertices = this->vertices->NumberOfVertices();
+	int numberofelements = this->elements->NumberOfElements();
+	int elementswidth    = this->GetElementsWidth();
+	int numinputs,numP0inputs,numP1inputs;
 	IssmDouble* P0inputs								= NULL;
 	Vector<IssmDouble>* vP0inputs					= NULL;
 	int* P0input_enums								= NULL;
 	int* P0input_interp 								= NULL;
-	int numP1inputs									= -1;
 	IssmDouble* P1inputs								= NULL;
 	Vector<IssmDouble>* vP1inputs					= NULL;
 	int* P1input_enums  								= NULL;
 	int* P1input_interp 								= NULL;
-	Vector<IssmDouble>* input_interpolations	= NULL;
-	IssmDouble* input_interpolations_serial	= NULL;
+	int* input_interpolations                 = NULL;
+	int* input_enums                          = NULL;
    int* pos												= NULL;
 	IssmDouble value									= 0;
 
 	/*Figure out how many inputs we have and their respective interpolation*/
-	input_interpolations=new Vector<IssmDouble>(maxinputs);
-	if(this->elements->Size()){
-		Element* element=xDynamicCast<Element*>(this->elements->GetObjectByOffset(0));
-		element->GetInputsInterpolations(input_interpolations);
-	}
-
-	/*Assemble and serialize*/
-	input_interpolations->Assemble();
-	input_interpolations_serial = input_interpolations->ToMPISerial();
+	this->inputs2->GetInputsInterpolations(&numinputs,&input_interpolations,&input_enums);
 
 	/*Count and get enums of all inputs in old mesh*/
@@ -3278,6 +3396,6 @@
 		numP0inputs = 0;
 		numP1inputs = 0;
-		for(int i=0;i<maxinputs;i++){
-			int inputinterp = reCast<int>(input_interpolations_serial[i]);
+		for(int i=0;i<numinputs;i++){
+			int inputinterp = input_interpolations[i];
 			switch(inputinterp){
 				case 0:
@@ -3286,5 +3404,5 @@
 				case P1Enum:
 					if(step){
-						P1input_enums[numP1inputs]  = i;
+						P1input_enums[numP1inputs]  = input_enums[i];
 						P1input_interp[numP1inputs] = inputinterp;
 					}
@@ -3292,9 +3410,8 @@
 					break;
 				case P0Enum:
-				case DoubleInputEnum:
-				case IntInputEnum:
-				case BoolInputEnum:
+				case IntInput2Enum:
+				case BoolInput2Enum:
 					if(step){
-						P0input_enums[numP0inputs]  = i;
+						P0input_enums[numP0inputs]  = input_enums[i];
 						P0input_interp[numP0inputs] = inputinterp;
 					}
@@ -3302,5 +3419,5 @@
 					break;
 				default:
-					_error_(EnumToStringx(inputinterp)<<" Not supported yet");
+					_error_(EnumToStringx(inputinterp)<<" ("<<inputinterp<<") Not supported yet");
 			}
 		}
@@ -3316,6 +3433,25 @@
 		/*Get P0 inputs*/
 		for(int j=0;j<numP0inputs;j++){
-			TriaInput* input=xDynamicCast<TriaInput*>(element->GetInput(P0input_enums[j]));
-			input->GetInputAverage(&value);
+			switch(P0input_interp[j]){
+				case P0Enum:{
+					Input2* input=element->GetInput2(P0input_enums[j]);
+					input->GetInputAverage(&value);
+				}
+							 break;
+				case IntInput2Enum:{
+					int valueint;
+					element->GetInput2Value(&valueint,P0input_enums[j]);
+					value = reCast<IssmDouble>(valueint);
+				}
+									 break;
+				case BoolInput2Enum:{
+					bool valuebool;
+					element->GetInput2Value(&valuebool,P0input_enums[j]);
+					value = reCast<IssmDouble>(valuebool);
+				}
+					break;
+				default:
+					_error_(EnumToStringx(P0input_interp[j])<<" ("<<P0input_interp[j]<<") Not supported yet");
+			}
 			pos[0]=element->Sid()*numP0inputs+j;
 			/*Insert input in the vector*/
@@ -3325,10 +3461,11 @@
 		/*Get P1 inputs*/
 		for(int j=0;j<numP1inputs;j++){
-			TriaInput* input=xDynamicCast<TriaInput*>(element->GetInput(P1input_enums[j]));
+			Input2* temp = element->GetInput2(P1input_enums[j]); _assert_(temp);
+			ElementInput2* input=xDynamicCast<ElementInput2*>(temp);
 			pos[0]=element->vertices[0]->Sid()*numP1inputs+j;
 			pos[1]=element->vertices[1]->Sid()*numP1inputs+j;
 			pos[2]=element->vertices[2]->Sid()*numP1inputs+j;
 			/*Insert input in the vector*/
-			vP1inputs->SetValues(elementswidth,pos,input->values,INS_VAL);
+			vP1inputs->SetValues(elementswidth,pos,input->element_values,INS_VAL);
 		}
 	}
@@ -3351,12 +3488,12 @@
 
 	/*Cleanup*/
-	delete input_interpolations;
 	delete vP0inputs;
 	delete vP1inputs;
-	xDelete<IssmDouble>(input_interpolations_serial);
+	xDelete<int>(input_interpolations);
+	xDelete<int>(input_enums);
 	xDelete<int>(pos);
 }
 /*}}}*/
-void FemModel::InterpolateInputs(Vertices* newfemmodel_vertices,Elements* newfemmodel_elements){/*{{{*/
+void FemModel::InterpolateInputs(Vertices* newfemmodel_vertices,Elements* newfemmodel_elements,Inputs2* newinputs2){/*{{{*/
 
 	int numberofelements			= -1;												//global, entire old mesh
@@ -3419,4 +3556,5 @@
 
 	/*Insert P0 and P1 inputs into the new elements (just on the new partition)*/
+	int vertexlids[3];
 	values=xNew<IssmDouble>(elementswidth);
 	for(int i=0;i<newfemmodel_elements->Size();i++){//just on the new partition
@@ -3426,25 +3564,26 @@
 			switch(P0input_interp[j]){
 				case P0Enum:
-				case DoubleInputEnum:
-					element->AddInput(new DoubleInput(P0input_enums[j],newP0inputs[i*numP0inputs+j]));
+					element->SetElementInput(newinputs2,P0input_enums[j],newP0inputs[i*numP0inputs+j]);
 					break;
-				case IntInputEnum:
-					element->AddInput(new IntInput(P0input_enums[j],reCast<int>(newP0inputs[i*numP0inputs+j])));
+				case IntInput2Enum:
+					element->SetIntInput(newinputs2,P0input_enums[j],reCast<int>(newP0inputs[i*numP0inputs+j]));
 					break;
-				case BoolInputEnum:
-					element->AddInput(new BoolInput(P0input_enums[j],reCast<bool>(newP0inputs[i*numP0inputs+j])));
+				case BoolInput2Enum:
+					element->SetBoolInput(newinputs2,P0input_enums[j],reCast<bool>(newP0inputs[i*numP0inputs+j]));
 					break;
 				default:
-					_error_(EnumToStringx(P0input_enums[j])<<" Not supported yet");
+					_error_(EnumToStringx(P0input_interp[j])<<" Not supported yet");
 			}
 		}
 		/*newP1inputs is just on the new partition*/
+		for(int i=0;i<3;i++) vertexlids[i]=element->vertices[i]->lid;
 		for(int j=0;j<numP1inputs;j++){
 			values[0]=newP1inputs[sidtoindex[element->vertices[0]->Sid()]*numP1inputs+j];
 			values[1]=newP1inputs[sidtoindex[element->vertices[1]->Sid()]*numP1inputs+j];
 			values[2]=newP1inputs[sidtoindex[element->vertices[2]->Sid()]*numP1inputs+j];
-			element->inputs->AddInput(new TriaInput(P1input_enums[j],values,P1Enum));
-		}
-	}
+			newinputs2->SetTriaInput(P1input_enums[j],P1Enum,3,vertexlids,values);
+		}
+	}
+
 
 	/*Cleanup*/
@@ -3566,5 +3705,5 @@
 
 	/*newlementslist is in Matlab indexing*/
-
+	int lid=0;
 	for(int i=0;i<newnumberofelements;i++){
 		if(my_elements[i]){
@@ -3573,6 +3712,10 @@
 			newtria->id=i+1;
 			newtria->sid=i;
+			newtria->lid=lid++;
+			newtria->iscollapsed=0;
+			newtria->isonsurface = true;
+			newtria->isonbase = true;
 			newtria->parameters=NULL;
-			newtria->inputs=new Inputs();
+			newtria->inputs2=NULL;
 			newtria->nodes=NULL;
 			newtria->vertices=NULL;
@@ -5088,22 +5231,16 @@
 }
 /*}}}*/
-void FemModel::InitTransientOutputx(int* transientinput_enum,int numoutputs){ /*{{{*/
+void FemModel::InitTransientInputx(int* transientinput_enum,int numoutputs){ /*{{{*/
 
 	for(int i=0;i<numoutputs;i++){
-		if(transientinput_enum[i]<0){
-			_error_("Can't deal with non enum fields for result Stack");
-		}
-		else{
-			for(int j=0;j<elements->Size();j++){
-				/*Intermediaries*/
-				TransientInput* transient_input = new TransientInput(transientinput_enum[i]);
-				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
-				element->inputs->AddInput(transient_input);
-			}
-		}
-	}
-}
-/*}}}*/
-void FemModel::StackTransientOutputx(int* input_enum,int* transientinput_enum,IssmDouble subtime,int numoutputs){ /*{{{*/
+		this->inputs2->DeleteInput(transientinput_enum[i]);
+		this->inputs2->SetTransientInput(transientinput_enum[i],NULL,0);
+		/*We need to configure this input!*/
+		TransientInput2* transientinput = this->inputs2->GetTransientInput(transientinput_enum[i]); _assert_(transientinput);
+		transientinput->Configure(this->parameters);
+	}
+}
+/*}}}*/
+void FemModel::StackTransientInputx(int* input_enum,int* transientinput_enum,IssmDouble subtime,int numoutputs){ /*{{{*/
 
   for(int i=0;i<numoutputs;i++){
@@ -5113,59 +5250,38 @@
 		else{
 			for(int j=0;j<elements->Size();j++){
-				/*Intermediaries*/
+
+				/*Get the right transient input*/
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
-				Input* input=element->inputs->GetInput(transientinput_enum[i]); _assert_(input); //this is the enum stack
-				TransientInput* stacking_input=xDynamicCast<TransientInput*>(input);
-
-				int  numvertices = element->GetNumberOfVertices();
-				IssmDouble* N=xNew<IssmDouble>(numvertices);
-				element->GetInputListOnVertices(&N[0],input_enum[i]);   //this is the enum to stack
+				TransientInput2* transientinput = this->inputs2->GetTransientInput(transientinput_enum[i]);
+
+				/*Get values and lid list*/
+				const int   numvertices = element->GetNumberOfVertices();
+				IssmDouble* values=xNew<IssmDouble>(numvertices);
+				int        *vertexlids = xNew<int>(numvertices);
+				element->GetInputListOnVertices(&values[0],input_enum[i]);   //this is the enum to stack
+
+				element->GetVerticesLidList(vertexlids);
+
 				switch(element->ObjectEnum()){
-				case TriaEnum:
-					stacking_input->AddTimeInput(new TriaInput(transientinput_enum[i],&N[0],P1Enum),subtime);
-					break;
-				case PentaEnum:
-					stacking_input->AddTimeInput(new PentaInput(transientinput_enum[i],&N[0],P1Enum),subtime);
-					break;
-				case TetraEnum:
-					stacking_input->AddTimeInput(new TetraInput(transientinput_enum[i],&N[0],P1Enum),subtime);
-					break;
-				default: _error_("Not implemented yet");
+					case TriaEnum:  transientinput->AddTriaTimeInput(subtime,numvertices,vertexlids,values,P1Enum); break;
+					case PentaEnum: transientinput->AddPentaTimeInput(subtime,numvertices,vertexlids,values,P1Enum); break;
+					default: _error_("Not implemented yet");
 				}
-				stacking_input->Configure(parameters);
-				xDelete<IssmDouble>(N);
-			}
-		}
-	}
-}
-/*}}}*/
-void FemModel::AverageTransientOutputx(int* transientinput_enum,int* averagedinput_enum,IssmDouble init_time,IssmDouble end_time,int numoutputs){ /*{{{*/
+				xDelete<IssmDouble>(values);
+				xDelete<int>(vertexlids);
+			}
+		}
+	}
+}
+/*}}}*/
+void FemModel::AverageTransientInputx(int* transientinput_enum,int* averagedinput_enum,IssmDouble init_time,IssmDouble end_time,int numoutputs){ /*{{{*/
 
 	for(int i=0;i<numoutputs;i++){
-		if(transientinput_enum[i]<0){
-			_error_("Can't deal with non enum fields for result Stack");
-		}
-		else{
-			for(int j=0;j<this->elements->Size();j++){
-				Element*    element       = xDynamicCast<Element*>(elements->GetObjectByOffset(j));
-				int         numnodes      = element->GetNumberOfNodes();
-				IssmDouble* time_averaged = xNew<IssmDouble>(numnodes);
-				Gauss*      gauss         = element->NewGauss();
-
-				Input*      input         = element->GetInput(transientinput_enum[i]); _assert_(input);
-				TransientInput* transient_input=xDynamicCast<TransientInput*>(input);
-
-				for(int iv=0;iv<numnodes;iv++){
-					gauss->GaussNode(element->FiniteElement(),iv);
-					transient_input->GetInputAverageOverTimeSlice(&time_averaged[iv],gauss,init_time,end_time);
-				}
-				element->AddInput(averagedinput_enum[i],&time_averaged[0],element->GetElementType());
-				delete gauss;
-				xDelete<IssmDouble>(time_averaged);
-			}
-		}
-	}
-}
-/*}}}*/
+		for(int j=0;j<this->elements->Size();j++){
+			Element* element = xDynamicCast<Element*>(elements->GetObjectByOffset(j));
+			element->CreateInputTimeAverage(transientinput_enum[i],averagedinput_enum[i],init_time,end_time);
+		}
+	}
+}/*}}}*/
 #ifdef _HAVE_JAVASCRIPT_
 FemModel::FemModel(IssmDouble* buffer, int buffersize, char* toolkits, char* solution, char* modelname,ISSM_MPI_Comm incomm, bool trace){ /*{{{*/
@@ -5300,8 +5416,8 @@
 		for(int i=0;i<numberofvertices;i++) hmaxvertices_serial[i]=NAN;
 		/*Fill hmaxvertices*/
+		if(this->amrbamg->thicknesserror_threshold>0)	this->GethmaxVerticesFromEstimators(hmaxvertices_serial,ThicknessErrorEstimatorEnum);
+		if(this->amrbamg->deviatoricerror_threshold>0)	this->GethmaxVerticesFromEstimators(hmaxvertices_serial,DeviatoricStressErrorEstimatorEnum);
 		if(this->amrbamg->groundingline_distance>0)		this->GethmaxVerticesFromZeroLevelSetDistance(hmaxvertices_serial,MaskGroundediceLevelsetEnum);
 		if(this->amrbamg->icefront_distance>0)				this->GethmaxVerticesFromZeroLevelSetDistance(hmaxvertices_serial,MaskIceLevelsetEnum);
-		if(this->amrbamg->thicknesserror_threshold>0)	this->GethmaxVerticesFromEstimators(hmaxvertices_serial,ThicknessErrorEstimatorEnum);
-		if(this->amrbamg->deviatoricerror_threshold>0)	this->GethmaxVerticesFromEstimators(hmaxvertices_serial,DeviatoricStressErrorEstimatorEnum);
 	}
 
@@ -5484,4 +5600,5 @@
 	bool refine;
 
+
 	/*Fill variables*/
 	switch(errorestimator_type){
@@ -5505,4 +5622,11 @@
 	if(groupthreshold<DBL_EPSILON) _error_("group threshold is too small!");
 
+	/*Get mesh*/
+	this->GetMesh(&index,&x,&y,&numberofvertices,&numberofelements);
+	if(numberofelements<0) _error_("number of elements is negative!\n");
+	if(numberofvertices<0) _error_("number of vertices is negative!\n");
+	maxlength		= xNew<IssmDouble>(numberofelements);
+	error_vertices	= xNewZeroInit<IssmDouble>(numberofvertices);
+
 	/*Find the max of the estimators if it was not provided*/
 	if(maxerror<DBL_EPSILON){
@@ -5513,9 +5637,4 @@
    	}
 	}
-
-	/*Get mesh*/
-	this->GetMesh(&index,&x,&y,&numberofvertices,&numberofelements);
-	maxlength		= xNew<IssmDouble>(numberofelements);
-	error_vertices	= xNewZeroInit<IssmDouble>(numberofvertices);
 
 	/*Fill error_vertices (this is the sum of all elements connected to the vertex)*/
Index: /issm/trunk/src/c/classes/FemModel.h
===================================================================
--- /issm/trunk/src/c/classes/FemModel.h	(revision 24685)
+++ /issm/trunk/src/c/classes/FemModel.h	(revision 24686)
@@ -11,4 +11,5 @@
 class DataSet;
 class Parameters;
+class Inputs2;
 class Nodes;
 class Vertices;
@@ -43,4 +44,5 @@
 		Materials   *materials;            //one set of materials, for each element
 		Parameters  *parameters;           //one set of parameters, independent of the analysis_type
+		Inputs2     *inputs2;              //one set of inputs, independent of the analysis_type
 		Results     *results;              //results that cannot be fit into the elements
 		Vertices    *vertices;             //one set of vertices
@@ -110,4 +112,5 @@
 		void IceVolumex(IssmDouble* pV, bool scaled);
 		void IceVolumeAboveFloatationx(IssmDouble* pV, bool scaled);
+		void InputMakeDiscontinuous(int enum_in);
 		void MassFluxx(IssmDouble* presponse);
 		void MaxAbsVxx(IssmDouble* presponse);
@@ -148,4 +151,5 @@
 		void OmegaAbsGradientx( IssmDouble* pJ);
 		void EtaDiffx( IssmDouble* pJ);
+		void ThicknessAverage();
 		void ThicknessAbsGradientx( IssmDouble* pJ);
 		void ThicknessPositivex(IssmDouble* pJ);
@@ -170,7 +174,7 @@
 		void UpdateConstraintsExtrudeFromTopx();
 		void UpdateConstraintsL2ProjectionEPLx(IssmDouble* pL2count);
-		void InitTransientOutputx(int* transientinput_enum,int numoutputs);
-		void StackTransientOutputx(int* input_enum,int* transientinput_enum,IssmDouble hydrotime,int numoutputs);
-		void AverageTransientOutputx(int* transientinput_enum,int* averagedinput_enum,IssmDouble init_time,IssmDouble end_time,int numoutputs);
+		void InitTransientInputx(int* transientinput_enum,int numoutputs);
+		void StackTransientInputx(int* input_enum,int* transientinput_enum,IssmDouble hydrotime,int numoutputs);
+		void AverageTransientInputx(int* transientinput_enum,int* averagedinput_enum,IssmDouble init_time,IssmDouble end_time,int numoutputs);
 		void UpdateConstraintsx(void);
 		int  UpdateVertexPositionsx(void);
@@ -196,5 +200,5 @@
 		void CreateConstraints(Vertices* newfemmodel_vertices,int analysis_enum,Constraints* newfemmodel_constraints);
 		void GetInputs(int* pnumP0inputs,IssmDouble** pP0inputs,int** pP0input_enums,int** pP0input_interp,int* pnumP1inputs,IssmDouble** pP1inputs,int** pP1input_enums,int** pP1input_interp);
-		void InterpolateInputs(Vertices* newfemmodel_vertices,Elements* newfemmodel_elements);
+		void InterpolateInputs(Vertices* newfemmodel_vertices,Elements* newfemmodel_elements,Inputs2* new_inputs);
 		void UpdateElements(int newnumberofelements,int* newelementslist,bool* my_elements,int analysis_counter,Elements* newelements);
 		void WriteMeshInResults(void);
Index: /issm/trunk/src/c/classes/Inputs2/ArrayInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/ArrayInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/ArrayInput2.cpp	(revision 24686)
@@ -0,0 +1,132 @@
+/*!\file ArrayInput2.c
+ * \brief: implementation of the ArrayInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "../../shared/shared.h"
+#include "./ArrayInput2.h"
+
+/*ArrayInput2 constructors and destructor*/
+ArrayInput2::ArrayInput2(void){/*{{{*/
+
+	this->numberofelements_local = -1;
+	this->N                      = NULL;
+	this->values                 = NULL;
+
+}/*}}}*/
+ArrayInput2::ArrayInput2(int nbe_in){/*{{{*/
+
+	_assert_(nbe_in>0);
+	_assert_(nbe_in<1e11);
+	this->numberofelements_local = nbe_in;
+	this->N                      = xNewZeroInit<int>(this->numberofelements_local);
+	this->values                 = xNewZeroInit<IssmDouble*>(this->numberofelements_local);
+
+}/*}}}*/
+ArrayInput2::~ArrayInput2(){/*{{{*/
+	if(this->values){
+		for(int i=0;i<this->numberofelements_local;i++) if(this->values[i]) xDelete<IssmDouble>(this->values[i]);
+		xDelete<IssmDouble>(this->values);
+	}
+	if(this->N) xDelete<int>(this->N);
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* ArrayInput2::copy() {/*{{{*/
+
+	ArrayInput2* output = new ArrayInput2(this->numberofelements_local);
+
+	output->N = xNew<int>(this->numberofelements_local);
+	xMemCpy<int>(output->N,this->N,this->numberofelements_local);
+
+	output->values = xNew<IssmDouble*>(this->numberofelements_local);
+	for(int i=0;i<this->numberofelements_local;i++){
+		if(this->values[i]){
+			_assert_(this->N[i]>0);
+			output->values[i] = xNew<IssmDouble>(this->N[i]);
+			xMemCpy<IssmDouble>(output->values[i],this->values[i],this->N[i]);
+		}
+		else{
+			output->values[i] = NULL;
+		}
+	}
+
+	return output;
+}
+/*}}}*/
+void ArrayInput2::DeepEcho(void){/*{{{*/
+	_printf_("ArrayInput2 Echo:\n");
+	///_printf_("   Size:          "<<N<<"\n");
+	//printarray(this->values,this->M,this->N);
+	//_printf_(setw(15)<<"   ArrayInput2 "<<setw(25)<<left<<EnumToStringx(this->enum_type)<<" "<<(value?"true":"false") << "\n");
+}
+/*}}}*/
+void ArrayInput2::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  ArrayInput2::Id(void){/*{{{*/
+	return -1;
+}/*}}}*/
+void ArrayInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(ArrayInput2Enum);
+	MARSHALLING(this->numberofelements_local);
+	if(this->numberofelements_local){
+		MARSHALLING_DYNAMIC(this->N,int,this->numberofelements_local);
+		for(int i=0;i<this->numberofelements_local;i++){
+			if(this->values[i]){
+				MARSHALLING_DYNAMIC(this->values[i],IssmDouble,this->N[i]);
+			}
+		}
+	}
+	else{
+		this->N      = NULL;
+		this->values = NULL;
+	}
+
+}
+/*}}}*/
+int  ArrayInput2::ObjectEnum(void){/*{{{*/
+	return ArrayInput2Enum;
+}
+/*}}}*/
+
+/*ArrayInput2 management*/
+void ArrayInput2::SetInput(int row,int numindices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	_assert_(row>=0 && row<this->numberofelements_local);
+
+	if(this->N[row] != numindices){
+		if(this->values[row]) xDelete<IssmDouble>(this->values[row]);
+		this->values[row] = xNew<IssmDouble>(numindices);
+	}
+
+	IssmDouble *el_values = this->values[row];
+	for(int i=0;i<numindices;i++) el_values[i] = values_in[i];
+
+	this->N[row] = numindices;
+}
+/*}}}*/
+void ArrayInput2::GetArray(int row,IssmDouble** pvalues,int* pN){/*{{{*/
+
+	_assert_(this);
+	_assert_(row>=0 && row<this->numberofelements_local);
+	if(pvalues){
+		IssmDouble* outvalues = xNew<IssmDouble>(this->N[row]);
+		xMemCpy<IssmDouble>(outvalues,this->values[row],this->N[row]);
+		*pvalues = outvalues;
+	}
+	if(pN){
+		*pN = this->N[row];
+	}
+}
+/*}}}*/
Index: /issm/trunk/src/c/classes/Inputs2/ArrayInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/ArrayInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/ArrayInput2.h	(revision 24686)
@@ -0,0 +1,33 @@
+#ifndef _ARRAYINPUT2_H_
+#define _ARRAYINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+
+class ArrayInput2: public Input2{
+
+	private:
+		int         numberofelements_local;
+		int*        N;
+		IssmDouble** values;
+
+	public:
+		/*ArrayInput2 constructors, destructors: {{{*/
+		ArrayInput2();
+		ArrayInput2(int nbe_in);
+		~ArrayInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2 *copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*ArrayInput2 management:*/
+		void SetInput(int row,int numinds,IssmDouble* values_in);
+		void GetArray(int row,IssmDouble** pvalues,int* pN);
+
+};
+#endif  /* _ARRAYINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/BoolInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/BoolInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/BoolInput2.cpp	(revision 24686)
@@ -0,0 +1,93 @@
+/*!\file BoolInput2.c
+ * \brief: implementation of the BoolInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "./BoolInput2.h"
+#include "../../shared/shared.h"
+
+/*BoolInput2 constructors and destructor*/
+BoolInput2::BoolInput2(){/*{{{*/
+	this->size   = -1;
+	this->values = NULL;
+}
+/*}}}*/
+BoolInput2::BoolInput2(int size_in){/*{{{*/
+	_assert_(size_in>0);
+	_assert_(size_in<1e11);
+	this->size   = size_in;
+	this->values = xNew<bool>(size_in);
+}
+/*}}}*/
+BoolInput2::~BoolInput2(){/*{{{*/
+	xDelete<bool>(this->values);
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* BoolInput2::copy() {/*{{{*/
+
+	_assert_(this->size);
+	BoolInput2* output = new BoolInput2(this->size);
+	xMemCpy<bool>(output->values,this->values,this->size);
+
+	return output;
+
+}
+/*}}}*/
+void BoolInput2::DeepEcho(void){/*{{{*/
+
+	_printf_("BoolInput2 Echo:\n");
+	_printf_("   Size:          "<<size<<"\n");
+	printarray(this->values,this->size);
+}
+/*}}}*/
+void BoolInput2::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  BoolInput2::Id(void){ return -1; }/*{{{*/
+/*}}}*/
+void BoolInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(BoolInput2Enum);
+	MARSHALLING(this->size);
+	if(this->size > 0){
+		MARSHALLING_DYNAMIC(this->values,bool,this->size)
+	}
+	else this->values = NULL;
+
+}
+/*}}}*/
+int  BoolInput2::ObjectEnum(void){/*{{{*/
+
+	return BoolInput2Enum;
+
+}
+/*}}}*/
+
+/*BoolInput2 management*/
+void BoolInput2::GetInput(bool* pvalue,int index){/*{{{*/
+
+	_assert_(index>=0); 
+	_assert_(index<this->size); 
+
+	*pvalue = this->values[index];
+}
+/*}}}*/
+void BoolInput2::SetInput(int index,bool value){/*{{{*/
+
+	_assert_(index>=0); 
+	_assert_(index<this->size); 
+
+	this->values[index] = value;
+}
+/*}}}*/
+
+/*Object functions*/
Index: /issm/trunk/src/c/classes/Inputs2/BoolInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/BoolInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/BoolInput2.h	(revision 24686)
@@ -0,0 +1,35 @@
+#ifndef _BOOLINPUT2_H_
+#define _BOOLINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+
+class BoolInput2: public Input2{
+
+	private:
+		int   size;
+		bool* values;
+
+	public:
+		/*BoolInput2 constructors, destructors: {{{*/
+		BoolInput2();
+		BoolInput2(int size_in);
+		~BoolInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2 *copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*BoolInput2 management: {{{*/
+		void GetInput(bool* pvalue,int index);
+		void SetInput(int index,bool value);
+		/*}}}*/
+		/*numerics: {{{*/
+		/*}}}*/
+
+};
+#endif  /* _BOOLINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/ControlInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/ControlInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/ControlInput2.cpp	(revision 24686)
@@ -0,0 +1,209 @@
+/*!\file ControlInput2.c
+ * \brief: implementation of the ControlInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./ControlInput2.h"
+#include "./ElementInput2.h"
+#include "./TriaInput2.h"
+#include "./PentaInput2.h"
+//#include "../../toolkits/objects/Vector.h"
+
+/*ControlInput2 constructors and destructor*/
+ControlInput2::ControlInput2(){/*{{{*/
+	control_id  = 0;
+	values      = NULL;
+	savedvalues = NULL;
+	minvalues   = NULL;
+	maxvalues   = NULL;
+	gradient    = NULL;
+}
+/*}}}*/
+ControlInput2::ControlInput2(int nbe, int nbv,int input_layout_enum,int interp,int id){/*{{{*/
+
+	this->control_id  = id;
+	this->layout_enum = input_layout_enum;
+
+	_assert_(interp==P1Enum);
+
+	switch(this->layout_enum){
+		case TriaInput2Enum:
+			this->values     =new TriaInput2(nbe,nbv,interp);
+			this->savedvalues=new TriaInput2(nbe,nbv,interp);
+			this->minvalues  =new TriaInput2(nbe,nbv,interp);
+			this->maxvalues  =new TriaInput2(nbe,nbv,interp);
+			this->gradient   =new TriaInput2(nbe,nbv,interp);
+			break;
+		case PentaInput2Enum:
+			this->values     =new PentaInput2(nbe,nbv,interp);
+			this->savedvalues=new PentaInput2(nbe,nbv,interp);
+			this->minvalues  =new PentaInput2(nbe,nbv,interp);
+			this->maxvalues  =new PentaInput2(nbe,nbv,interp);
+			this->gradient   =new PentaInput2(nbe,nbv,interp);
+			break;
+		default:
+			_error_("Input of Enum \"" << EnumToStringx(input_layout_enum) << "\" not supported yet by ControlInput2");
+	}
+}
+/*}}}*/
+ControlInput2::~ControlInput2(){/*{{{*/
+	if(values)      delete values;
+	if(savedvalues) delete savedvalues;
+	if(minvalues)   delete minvalues;
+	if(maxvalues)   delete maxvalues;
+	if(gradient)    delete gradient;
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* ControlInput2::copy() {/*{{{*/
+
+	ControlInput2* output=NULL;
+
+	output = new ControlInput2();
+	output->enum_type=this->enum_type;
+	output->control_id=this->control_id;
+	output->layout_enum = this->control_id;
+
+	if(values)      output->values      = xDynamicCast<ElementInput2*>(this->values->copy());
+	if(savedvalues) output->savedvalues = xDynamicCast<ElementInput2*>(this->savedvalues->copy());
+	if(minvalues)   output->minvalues   = xDynamicCast<ElementInput2*>(this->minvalues->copy());
+	if(maxvalues)   output->maxvalues   = xDynamicCast<ElementInput2*>(this->maxvalues->copy());
+	if(gradient)    output->gradient    = xDynamicCast<ElementInput2*>(this->gradient->copy());
+
+	return output;
+}
+/*}}}*/
+void ControlInput2::DeepEcho(void){/*{{{*/
+
+	_printf_("ControlInput2:\n");
+	_printf_(setw(15)<<"   ControlInput2 "<<setw(25)<<left<<EnumToStringx(this->enum_type)<<"\n");
+	_printf_(setw(15)<<"   ControlInput2 "<<setw(25)<<left<<EnumToStringx(this->layout_enum)<<"\n");
+	_printf_("---values: \n");     if (values)      values->Echo();
+	_printf_("---savedvalues: \n");if (savedvalues) savedvalues->Echo();
+	_printf_("---minvalues: \n");  if (minvalues)   minvalues->Echo();
+	_printf_("---maxvalues: \n");  if (maxvalues)   maxvalues->Echo();
+	_printf_("---gradient: \n");   if (gradient){    gradient->Echo();} else{_printf_("     Not set yet\n");}
+}
+/*}}}*/
+void ControlInput2::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  ControlInput2::Id(void){ return -1; }/*{{{*/
+/*}}}*/
+void ControlInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(ControlInput2Enum);
+	_error_("Not implemented");
+}
+/*}}}*/
+int  ControlInput2::ObjectEnum(void){/*{{{*/
+
+	return ControlInput2Enum;
+
+}
+/*}}}*/
+
+void ControlInput2::SetControl(int interp,int numindices,int* indices,IssmDouble* values_in,IssmDouble* values_min,IssmDouble* values_max){/*{{{*/
+
+	_assert_(this);
+
+	/*Set input*/
+	//TriaInput2* input = xDynamicCast<TriaInput2*>(this->inputs[id]);
+	this->values->SetInput(interp,numindices,indices,values_in);
+	this->minvalues->SetInput(interp,numindices,indices,values_min);
+	this->maxvalues->SetInput(interp,numindices,indices,values_max);
+}
+/*}}}*/
+void ControlInput2::SetGradient(int interp,int numindices,int* indices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->gradient);
+	this->gradient->SetInput(interp,numindices,indices,values_in);
+}
+/*}}}*/
+void ControlInput2::SetGradient(int interp,int numindices,int* indices,IssmDouble* values_in,int n){/*{{{*/
+
+	if(this->values->ObjectEnum()!=TransientInput2Enum)_error_("you are in the wrong place, go home");
+	_assert_(this);
+	_assert_(this->gradient);
+	_error_("S");
+
+	//TransientInput2* transient_input = xDynamicCast<TransientInput2*>(this->gradient);
+	//TransientInput2* values_input    = xDynamicCast<TransientInput2*>(this->values);
+	//if(values_input->numtimesteps==transient_input->numtimesteps){
+	//	TransientInput* new_trans_input = new TransientInput2(ControlInputGradEnum);
+	//	IssmDouble time = transient_input->GetTimeByOffset(timestep);
+	//	for(int i=0;i<transient_input->numtimesteps;i++){
+	//		if(transient_input->timesteps[i]==time) new_trans_input->AddTimeInput(xDynamicCast<TriaInput*>(gradient_in),time);
+	//		else {
+	//			Input2* input = transient_input->GetTimeInput(transient_input->timesteps[i]);
+	//			new_trans_input->AddTimeInput(xDynamicCast<TriaInput2*>(input),transient_input->timesteps[i]);
+	//		}
+	//	}
+	//	this->gradient=new_trans_input;
+	//}
+	//else{
+	//	IssmDouble time = values_input->GetTimeByOffset(timestep);
+	//	transient_input->AddTimeInput(gradient_in,time);
+	//}
+
+
+	//NEW??
+	//this->gradient->SetInput(interp,numindices,indices,values_in);
+}
+/*}}}*/
+TriaInput2* ControlInput2::GetTriaInput(){/*{{{*/
+
+	/*Cast and return*/
+	if(this->values->ObjectEnum()!=TriaInput2Enum){
+		_error_("Cannot return a TriaInput2");
+	}
+	return xDynamicCast<TriaInput2*>(this->values);
+
+}
+/*}}}*/
+PentaInput2* ControlInput2::GetPentaInput(){/*{{{*/
+
+	/*Cast and return*/
+	if(this->values->ObjectEnum()!=PentaInput2Enum){
+		_error_("Cannot return a PentaInput2");
+	}
+	return xDynamicCast<PentaInput2*>(this->values);
+
+}
+/*}}}*/
+ElementInput2* ControlInput2::GetInput2(const char* data){/*{{{*/
+
+	if(strcmp(data,"value")==0){
+		_assert_(values);
+		return values;
+	}
+	else if(strcmp(data,"savedvalues")==0){
+		_assert_(savedvalues);
+		return values;
+	}
+	else if (strcmp(data,"lowerbound")==0){
+		_assert_(minvalues);
+		return minvalues;
+	}
+	else if (strcmp(data,"upperbound")==0){
+		_assert_(maxvalues);
+		return maxvalues;
+	}
+	else if (strcmp(data,"gradient")==0){
+		_assert_(gradient);
+		return gradient;
+	}
+	else{
+		_error_("Data " << data << " not supported yet");
+	}
+
+}
+/*}}}*/
Index: /issm/trunk/src/c/classes/Inputs2/ControlInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/ControlInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/ControlInput2.h	(revision 24686)
@@ -0,0 +1,47 @@
+/*! \file ControlInput2.h 
+ *  \brief: header file for triavertexinput object
+ */
+
+#ifndef _CONTROLINPUT2_H_
+#define _CONTROLINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+class Gauss;
+class ElementInput2;
+
+class ControlInput2: public Input2{
+
+	public:
+		int            control_id;
+		int            enum_type;
+		int            layout_enum;
+		ElementInput2 *gradient;
+		ElementInput2 *maxvalues;
+		ElementInput2 *minvalues;
+		ElementInput2 *savedvalues;
+		ElementInput2 *values;
+
+		/*ControlInput2 constructors, destructors: {{{*/
+		ControlInput2();
+		ControlInput2(int nbe, int nbv,int input_layout_enum,int interp,int id);
+		~ControlInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2* copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id(); 
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		void SetInput(Input2* in_input){_error_("not impelemented");};
+		void SetInput(Input2* in_input,int timeoffset){_error_("not impelemented");};
+		ElementInput2* GetInput2(const char* data);
+		void SetControl(int interp,int numindices,int* indices,IssmDouble* values_in,IssmDouble* values_min,IssmDouble* values_max);
+		void SetGradient(int interp,int numindices,int* indices,IssmDouble* values_in);
+		void SetGradient(int interp,int numindices,int* indices,IssmDouble* values_in,int n);
+		TriaInput2* GetTriaInput();
+		PentaInput2* GetPentaInput();
+};
+#endif  /* _CONTROLINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/DatasetInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/DatasetInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/DatasetInput2.cpp	(revision 24686)
@@ -0,0 +1,251 @@
+/*!\file DatasetInput2.c
+ * \brief: implementation of the datasetinput object
+ */
+/*Headers*/
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./DatasetInput2.h"
+#include "./TriaInput2.h"
+#include "./PentaInput2.h"
+#include "./TransientInput2.h"
+
+/*DatasetInput2 constructors and destructor*/
+DatasetInput2::DatasetInput2(){/*{{{*/
+	this->inputs    = NULL;
+	this->numids    = 0;
+	this->ids       = NULL;
+	this->numberofelements_local = -1;
+	this->numberofvertices_local = -1;
+}
+/*}}}*/
+DatasetInput2::DatasetInput2(int nbe, int nbv){/*{{{*/
+	this->inputs    = NULL;
+	this->numids    = 0;
+	this->ids       = NULL;
+	this->numberofelements_local = nbe;
+	this->numberofvertices_local = nbv;
+}
+/*}}}*/
+DatasetInput2::~DatasetInput2(){/*{{{*/
+	xDelete<int>(this->ids);
+	for(int i=0;i<this->numids;i++){
+		delete this->inputs[i];
+	}
+	xDelete<Input2*>(this->inputs);
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* DatasetInput2::copy() {/*{{{*/
+
+	DatasetInput2* output=NULL;
+
+	output = new DatasetInput2();
+	output->numids=this->numids;
+	if(this->numids>0){
+		output->ids=xNew<int>(output->numids);
+		xMemCpy(output->ids,this->ids,output->numids);
+		output->inputs = xNew<Input2*>(this->numids);
+		for(int i=0;i<this->numids;i++){
+			output->inputs[i] = this->inputs[i]->copy();
+		}
+	}
+
+	return output;
+}
+/*}}}*/
+void DatasetInput2::Configure(Parameters* params){/*{{{*/
+	for(int i=0;i<this->numids;i++){
+		this->inputs[i]->Configure(params);
+	}
+}
+/*}}}*/
+void DatasetInput2::DeepEcho(void){/*{{{*/
+
+	_printf_("DatasetInput2:\n");
+	_printf_("   numids:"<< this->numids<< "\n");
+	_printf_("      ids: ");
+	for(int i=0;i<this->numids;i++) _printf_(this->ids[i]<<" ("<<EnumToStringx(this->ids[i])<<") ");
+	_printf_("\n");
+	//_printf_("   inputs: \n"); inputs->Echo();
+}
+/*}}}*/
+void DatasetInput2::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  DatasetInput2::Id(void){ return -1; }/*{{{*/
+/*}}}*/
+void DatasetInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(DatasetInput2Enum);
+
+	MARSHALLING(numids);
+	MARSHALLING(this->numberofelements_local);
+	MARSHALLING(this->numberofvertices_local);
+	MARSHALLING_DYNAMIC(ids,int,numids);
+	//if (marshall_direction == MARSHALLING_BACKWARD) inputs = new Inputs();
+	//inputs->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+	_error_("not implemented");
+
+}
+/*}}}*/
+int  DatasetInput2::ObjectEnum(void){/*{{{*/
+	return DatasetInput2Enum;
+}/*}}}*/
+
+void DatasetInput2::SetTriaInput(int id,int interp_in,int numinds,int* rows,IssmDouble* values_in){ /*{{{*/
+
+	int  index = -1;
+	for(int i=0;i<this->numids;i++){
+		if(this->ids[i] == id) index = i;
+	}
+
+	/*Create new input if not found*/
+	if(index == -1){
+		int* new_ids = xNew<int>(this->numids+1);
+		if(this->numids) xMemCpy(new_ids,this->ids,this->numids);
+		new_ids[this->numids] = id;
+
+		Input2** new_inputs = xNew<Input2*>(this->numids+1);
+		if(this->numids) xMemCpy(new_inputs,this->inputs,this->numids);
+		new_inputs[this->numids] = new TriaInput2(this->numberofelements_local,this->numberofvertices_local,interp_in);
+		index = this->numids;
+
+		xDelete<int>(this->ids);
+		this->ids = new_ids;
+		xDelete<Input2*>(this->inputs);
+		this->inputs = new_inputs;
+
+		this->numids ++;
+	}
+
+	/*Set input*/
+	if(this->inputs[index]->ObjectEnum()!=TriaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[index]->ObjectEnum()));
+	TriaInput2* input = xDynamicCast<TriaInput2*>(this->inputs[index]);
+	input->SetInput(interp_in,numinds,rows,values_in);
+
+}
+/*}}}*/
+void DatasetInput2::SetPentaInput(int id,int interp_in,int numinds,int* rows,IssmDouble* values_in){ /*{{{*/
+
+	int  index = -1;
+	for(int i=0;i<this->numids;i++){
+		if(this->ids[i] == id) index = i;
+	}
+
+	/*Create new input if not found*/
+	if(index == -1){
+		int* new_ids = xNew<int>(this->numids+1);
+		if(this->numids) xMemCpy(new_ids,this->ids,this->numids);
+		new_ids[this->numids] = id;
+
+		Input2** new_inputs = xNew<Input2*>(this->numids+1);
+		if(this->numids) xMemCpy(new_inputs,this->inputs,this->numids);
+		new_inputs[this->numids] = new PentaInput2(this->numberofelements_local,this->numberofvertices_local,interp_in);
+		index = this->numids;
+
+		xDelete<int>(this->ids);
+		this->ids = new_ids;
+		xDelete<Input2*>(this->inputs);
+		this->inputs = new_inputs;
+
+		this->numids ++;
+	}
+
+	/*Set input*/
+	if(this->inputs[index]->ObjectEnum()!=PentaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[index]->ObjectEnum()));
+	PentaInput2* input = xDynamicCast<PentaInput2*>(this->inputs[index]);
+	input->SetInput(interp_in,numinds,rows,values_in);
+
+}
+/*}}}*/
+TransientInput2* DatasetInput2::SetTransientInput(int id,IssmDouble* times,int numtimes){ /*{{{*/
+
+	int  index = -1;
+	for(int i=0;i<this->numids;i++){
+		if(this->ids[i] == id) index = i;
+	}
+
+	/*Create new input if not found*/
+	if(index == -1){
+		int* new_ids = xNew<int>(this->numids+1);
+		if(this->numids) xMemCpy(new_ids,this->ids,this->numids);
+		new_ids[this->numids] = id;
+
+		Input2** new_inputs = xNew<Input2*>(this->numids+1);
+		if(this->numids) xMemCpy(new_inputs,this->inputs,this->numids);
+		new_inputs[this->numids] = new TransientInput2(NoneEnum,this->numberofelements_local,this->numberofvertices_local,times,numtimes);
+		index = this->numids;
+
+		xDelete<int>(this->ids);
+		this->ids = new_ids;
+		xDelete<Input2*>(this->inputs);
+		this->inputs = new_inputs;
+
+		this->numids ++;
+	}
+
+	/*Set input*/
+	if(this->inputs[index]->ObjectEnum()!=TransientInput2Enum) _error_("cannot add values to a "<<EnumToStringx(this->inputs[index]->ObjectEnum()));
+	TransientInput2* input = xDynamicCast<TransientInput2*>(this->inputs[index]);
+	return input;
+}
+/*}}}*/
+void DatasetInput2::GetInputValue(IssmDouble* pvalue,Gauss* gauss,int id){ /*{{{*/
+
+	int  index = -1;
+	for(int i=0;i<this->numids;i++){
+		if(this->ids[i] == id) index = i;
+	}
+
+	/*Create new input if not found*/
+	if(index == -1){
+		this->Echo();
+		_error_("Could not find input "<<id<<" ("<<EnumToStringx(id)<<"?) in DatasetInput");
+	}
+
+	Input2* input = this->inputs[index];
+
+	if(this->inputs[index]->ObjectEnum()==TransientInput2Enum){
+		input = xDynamicCast<TransientInput2*>(this->inputs[index])->current_input;
+	}
+
+	input->GetInputValue(pvalue,gauss);
+
+}
+/*}}}*/
+TriaInput2* DatasetInput2::GetTriaInputByOffset(int offset){/*{{{*/
+
+	_assert_(offset>=0 && offset<this->numids);
+	_assert_(this->inputs[offset]);
+
+	/*Cast and return*/
+	if(this->inputs[offset]->ObjectEnum()==TransientInput2Enum){
+		return xDynamicCast<TransientInput2*>(this->inputs[offset])->GetTriaInput();
+	}
+	if(this->inputs[offset]->ObjectEnum()!=TriaInput2Enum){
+		_error_("Cannot return a TriaInput2");
+	}
+	return xDynamicCast<TriaInput2*>(this->inputs[offset]);
+
+}/*}}}*/
+PentaInput2* DatasetInput2::GetPentaInputByOffset(int offset){/*{{{*/
+
+	_assert_(offset>=0 && offset<this->numids);
+	_assert_(this->inputs[offset]);
+
+	/*Cast and return*/
+	if(this->inputs[offset]->ObjectEnum()==TransientInput2Enum){
+		return xDynamicCast<TransientInput2*>(this->inputs[offset])->GetPentaInput();
+	}
+	if(this->inputs[offset]->ObjectEnum()!=PentaInput2Enum){
+		_error_("Cannot return a PentaInput2");
+	}
+	return xDynamicCast<PentaInput2*>(this->inputs[offset]);
+
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Inputs2/DatasetInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/DatasetInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/DatasetInput2.h	(revision 24686)
@@ -0,0 +1,47 @@
+/*! \file DatasetInput2.h 
+ *  \brief: header file for datasetinput object
+ */
+
+#ifndef _DATASETINPUT2_H_
+#define _DATASETINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+class TriaInput2;
+class PentaInput2;
+class TransientInput2;
+
+class DatasetInput2: public Input2{
+
+	private:
+		int             numids;
+		Input2        **inputs;
+		int            *ids;
+		int             numberofelements_local;
+		int             numberofvertices_local;
+		
+	public:
+		int GetNumIds() const {return this->numids;};
+		/*DatasetInput constructors, destructors: {{{*/
+		DatasetInput2();
+		DatasetInput2(int nbe, int nbv);
+		~DatasetInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2* copy();
+		void    Configure(Parameters* params);
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		void    SetTriaInput(int interp_in,int numinds,int* rows,IssmDouble* values_in);
+		/*}}}*/
+		void SetTriaInput(int id,int interp_in,int numinds,int* rows,IssmDouble* values_in);
+		void SetPentaInput(int id,int interp_in,int numinds,int* rows,IssmDouble* values_in);
+		TransientInput2* SetTransientInput(int id,IssmDouble* times,int numtimes);
+		PentaInput2* GetPentaInputByOffset(int i);
+		TriaInput2*  GetTriaInputByOffset(int i);
+		void GetInputValue(IssmDouble* pvalue,Gauss* gauss,int index);
+};
+#endif  /* _DATASETINPUT2_H */
Index: /issm/trunk/src/c/classes/Inputs2/ElementInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/ElementInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/ElementInput2.cpp	(revision 24686)
@@ -0,0 +1,36 @@
+/*!\file ElementInput2.c
+ * \brief: implementation of the ElementInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "../../shared/shared.h"
+#include "./ElementInput2.h"
+
+/*ElementInput2 constructors and destructor*/
+ElementInput2::ElementInput2(){/*{{{*/
+	this->interpolation  = -1;
+	this->M              = -1;
+	this->N              = -1;
+	this->isserved       = false;
+	this->element_values = NULL;
+	this->values         = NULL;
+}
+/*}}}*/
+ElementInput2::~ElementInput2(){/*{{{*/
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+	if(this->values)         xDelete<IssmDouble>(this->values);
+}
+/*}}}*/
+
+/*Numerics*/
+int ElementInput2::GetInputInterpolationType(void){/*{{{*/
+
+	return this->interpolation;
+
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Inputs2/ElementInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/ElementInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/ElementInput2.h	(revision 24686)
@@ -0,0 +1,46 @@
+#ifndef _ELEMENTINPUT2_H_
+#define _ELEMENTINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+
+class ElementInput2: public Input2{
+
+	protected:
+		int         numberofelements_local;
+		int         numberofvertices_local;
+		int         interpolation;
+		int         M,N;
+		bool        isserved;
+		IssmDouble* values;
+
+	public:
+		IssmDouble* element_values;
+
+		/*ElementInput2 constructors, destructors*/ 
+		ElementInput2();
+		~ElementInput2();
+
+		int  GetInputInterpolationType();
+
+		/*Object virtual functions definitions:*/
+		virtual Input2 *copy()=0;
+		virtual void    DeepEcho()=0;
+		virtual void    Echo()=0;
+		virtual int     Id()=0;
+		virtual void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction)=0;
+		virtual int     ObjectEnum()=0;
+		/*Other*/
+		virtual void SetInput(int interp_in,int row,IssmDouble value_in)=0;
+		virtual void SetInput(int interp_in,int numinds,int* rows,IssmDouble* values_in)=0;
+		virtual void SetInput(int interp_in,int row,int numinds,IssmDouble* values_in)=0;
+		virtual int  GetInterpolation()=0;
+		virtual void GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss)=0;
+		virtual void GetInputValue(IssmDouble* pvalue,Gauss* gauss)=0;
+		virtual void Serve(int numindices,int* indices)=0;
+		virtual void Serve(int row,int numindices)=0;
+		virtual int  GetResultArraySize(void)=0;
+		virtual int  GetResultInterpolation(void)=0;
+		virtual int  GetResultNumberOfNodes(void)=0;
+};
+#endif  /* _ELEMENTINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/Input2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/Input2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/Input2.h	(revision 24686)
@@ -0,0 +1,54 @@
+/*!\file:  Input2.h
+ * \brief abstract class for Input2 object
+ */ 
+
+#ifndef _INPUT2_H_
+#define _INPUT2_H_
+
+/*Headers:*/
+#include "../../shared/shared.h"
+#include "../../datastructures/Object.h"
+class Gauss;
+class Parameters;
+class SegInput2;
+class TriaInput2;
+class PentaInput2;
+template <class doubletype> class Vector;
+
+class Input2: public Object{
+
+	private:
+		int enum_type;
+	public:
+
+		/*Non virtual functions*/
+		int  InstanceEnum(){return this->enum_type;};
+		void ChangeEnum(int newenumtype){this->enum_type=newenumtype;};
+
+		/*Virtual functions*/
+		virtual ~Input2(){};
+		virtual void Configure(Parameters* parameters){return;};
+		virtual Input2* copy()=0;
+		//virtual void GetInput2AllTimeAverages(IssmDouble** pvalues,IssmDouble** ptimes, int* pnumtimes){_error_("Not implemented yet");};
+		virtual void  GetInputAverage(IssmDouble* pvalue){_error_("Not implemented yet");};
+		virtual IssmDouble GetInputMax(void){_error_("Not implemented yet");};
+		virtual IssmDouble GetInputMaxAbs(void){_error_("Not implemented yet");};
+		virtual IssmDouble GetInputMin(void){_error_("Not implemented yet");};
+		virtual void GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss){_error_("Not implemented yet");};
+		virtual void GetInputValue(IssmDouble* pvalue,Gauss* gauss){int* temp = xNew<int>(3); _error_("Not implemented yet for");};
+		virtual int  GetInputInterpolationType(){_error_("Not implemented yet");};
+		virtual SegInput2*   GetSegInput(){ int* temp = xNew<int>(3); this->Echo(); _error_("Not implemented yet");};
+		virtual TriaInput2*  GetTriaInput(){ int* temp = xNew<int>(3); this->Echo(); _error_("Not implemented yet");};
+		virtual PentaInput2* GetPentaInput(){int* temp = xNew<int>(3); this->Echo(); _error_("Not implemented yet");};
+		//virtual void GetInput2UpToCurrentTimeAverages(IssmDouble** pvalues, IssmDouble** ptimes, int* pnumtimes, IssmDouble currenttime){_error_("Not implemented yet");};
+
+		virtual void   AXPY(Input2* xinput,IssmDouble scalar){_error_("Not implemented yet");};
+		virtual void   Scale(IssmDouble scale_factor){_error_("Not implemented yet");};
+
+		virtual int  GetResultArraySize(void){_error_("Not implemented yet");};
+		virtual int  GetResultInterpolation(void){_error_("Not implemented yet");};
+		virtual int  GetResultNumberOfNodes(void){_error_("Not implemented yet");};
+		//virtual void ResultToMatrix(IssmDouble* values,int ncols,int sid){_error_("not supported yet");};
+		//virtual void ResultToPatch(IssmDouble* values,int nodesperelement,int sid){_error_("not supported yet");}; 
+};
+#endif
Index: /issm/trunk/src/c/classes/Inputs2/Inputs2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/Inputs2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/Inputs2.cpp	(revision 24686)
@@ -0,0 +1,870 @@
+/*\file Inputs.cpp
+ * \brief: Implementation of the Inputs class, derived from DataSet class.
+ */
+
+/*Headers: {{{*/
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./Input2.h"
+#include "./Inputs2.h"
+
+#include "./BoolInput2.h"
+#include "./IntInput2.h"
+#include "./ElementInput2.h"
+#include "./SegInput2.h"
+#include "./TriaInput2.h"
+#include "./PentaInput2.h"
+#include "./TransientInput2.h"
+#include "./ControlInput2.h"
+#include "./DatasetInput2.h"
+#include "./ArrayInput2.h"
+using namespace std;
+/*}}}*/
+
+/*Object constructors and destructor*/
+Inputs2::Inputs2(void){/*{{{*/
+
+	this->numberofelements_local = 0;
+	this->numberofvertices_local = 0;
+
+	/*Initialize pointers*/
+	for(int i=0;i<NUMINPUTS;i++) this->inputs[i] = NULL;
+}
+/*}}}*/
+Inputs2::Inputs2(int nbe,int nbv){/*{{{*/
+
+	this->numberofelements_local = nbe;
+	this->numberofvertices_local = nbv;
+
+	/*Initialize pointers*/
+	for(int i=0;i<NUMINPUTS;i++) this->inputs[i] = NULL;
+}
+/*}}}*/
+Inputs2::~Inputs2(){/*{{{*/
+	for(int i=0;i<NUMINPUTS;i++){
+		if(this->inputs[i]) delete this->inputs[i];
+	}
+	return;
+}
+/*}}}*/
+
+Inputs2* Inputs2::Copy(void){/*{{{*/
+
+	Inputs2* output = new Inputs2(this->numberofelements_local,this->numberofvertices_local);
+
+	for(int i=0;i<NUMINPUTS;i++){
+		if(this->inputs[i]) output->inputs[i]=this->inputs[i]->copy();
+	}
+
+	return output;
+}/*}}}*/
+void Inputs2::DeepEcho(void){/*{{{*/
+	for(int i=0;i<NUMINPUTS;i++) {
+		if(this->inputs[i]) this->inputs[i]->DeepEcho();
+	}
+	return;
+}
+/*}}}*/
+void Inputs2::Echo(void){/*{{{*/
+	_printf_("Inputs Echo:\n");
+	for(int i=0;i<NUMINPUTS;i++) {
+		if(this->inputs[i]) _printf_(setw(25)<<EnumToStringx(i+InputsSTARTEnum+1)<<": set as "<<EnumToStringx(this->inputs[i]->ObjectEnum())<<"\n");
+	}
+	return;
+}
+/*}}}*/
+void Inputs2::Marshall(char** pmarshalled_data, int* pmarshalled_data_size, int marshall_direction){/*{{{*/
+
+	int obj_enum=-1;
+	int num_inputs2=0;
+	int index;
+
+	MARSHALLING_ENUM(Inputs2Enum);
+
+	if(marshall_direction==MARSHALLING_FORWARD || marshall_direction==MARSHALLING_SIZE){
+
+		/*Marshall num_inputs2 first*/
+		for(int i=0;i<NUMINPUTS;i++){
+			if(this->inputs[i]) num_inputs2++;
+		}
+		MARSHALLING(num_inputs2);
+
+		/*Marshall Parameters one by one now*/
+		for(int i=0;i<NUMINPUTS;i++){
+			if(this->inputs[i]){
+				obj_enum = this->inputs[i]->ObjectEnum();
+				MARSHALLING(i);
+				MARSHALLING(obj_enum);
+				this->inputs[i]->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+			}
+		}
+	}
+	else{
+
+		/*Get number of inputs2 marshalled*/
+		MARSHALLING(num_inputs2);
+
+		/*Recover input2eters one by one*/
+		for(int i=0;i<num_inputs2;i++){
+
+			/*Recover enum of object first: */
+			MARSHALLING(index);
+			MARSHALLING(obj_enum);
+
+			if(obj_enum==BoolInput2Enum){
+				BoolInput2* boolinput2=new BoolInput2();
+				boolinput2->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+				this->inputs[index]=boolinput2;
+			}
+			else if(obj_enum==IntInput2Enum){
+				IntInput2* intinput2=new IntInput2();
+				intinput2->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+				this->inputs[index]=intinput2;
+			}
+			else if(obj_enum==TriaInput2Enum){
+				TriaInput2* triainput2=new TriaInput2();
+				triainput2->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+				this->inputs[index]=triainput2;
+			}
+			else if(obj_enum==PentaInput2Enum){
+				PentaInput2* pentainput2=new PentaInput2();
+				pentainput2->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+				this->inputs[index]=pentainput2;
+			}
+			else{
+				_error_("input "<<EnumToStringx(obj_enum)<<" not supported");
+			}
+		}
+	}
+}
+/*}}}*/
+
+void Inputs2::AddInput(Input2* newinput){/*{{{*/
+
+	/*Get Enum from Param*/
+	_assert_(newinput);
+	int input_enum = newinput->InstanceEnum();
+
+	/*Get index in array*/
+	#ifdef _ISSM_DEBUG_
+	if(input_enum<=InputsSTARTEnum) _error_("Cannot add input: Enum "<<EnumToStringx(input_enum)<<" should appear after InputsSTARTEnum");
+	if(input_enum>=InputsENDEnum)   _error_("Cannot add input: Enum "<<EnumToStringx(input_enum)<<" should appear before InputsENDEnum");
+	#endif
+	int index = input_enum - InputsSTARTEnum -1;
+
+	/*Delete input if it already exists*/
+	if(this->inputs[index]){
+		delete this->inputs[index];
+		this->inputs[index] = NULL;
+	}
+
+	/*Add input to array*/
+	this->inputs[index] = newinput;
+}
+/*}}}*/
+void Inputs2::ChangeEnum(int oldenumtype,int newenumtype){/*{{{*/
+
+	/*Get indices from enums*/
+	int index_old = EnumToIndex(oldenumtype);
+	int index_new = EnumToIndex(newenumtype);
+
+	/*Delete input if it already exists*/
+	if(this->inputs[index_new]) delete this->inputs[index_new];
+
+	/*Make sure that old one exists*/
+	if(!this->inputs[index_old]){
+		_error_("Input "<<EnumToStringx(oldenumtype)<<" not found");
+	}
+
+	/*Replace Enums*/
+	this->inputs[index_old]->ChangeEnum(newenumtype);
+	this->inputs[index_new] = this->inputs[index_old];
+	this->inputs[index_old] = NULL;
+}/*}}}*/
+void Inputs2::Configure(Parameters* parameters){/*{{{*/
+	for(int i=0;i<NUMINPUTS;i++){
+		if(this->inputs[i]) this->inputs[i]->Configure(parameters);
+	}
+}
+/*}}}*/
+int  Inputs2::DeleteInput(int input_enum){/*{{{*/
+
+	int index = EnumToIndex(input_enum);
+	if(this->inputs[index]){
+		delete this->inputs[index];
+		this->inputs[index] = NULL;
+	}
+
+	return 1;
+}
+/*}}}*/
+void Inputs2::DuplicateInput(int original_enum,int new_enum){/*{{{*/
+
+	_assert_(this);
+
+	/*Get indices from enums*/
+	int index_ori = EnumToIndex(original_enum);
+	int index_new = EnumToIndex(new_enum);
+
+	/*Delete input if it already exists*/
+	if(this->inputs[index_new]) delete this->inputs[index_new];
+
+	/*Make sure that old one exists*/
+	if(!this->inputs[index_ori]){
+		_error_("Input "<<EnumToStringx(original_enum)<<" not found");
+	}
+
+	/*Make a copy*/
+	Input2* copy=this->inputs[index_ori]->copy();
+
+	/*Add copy*/
+	this->inputs[index_new] = copy;
+}
+/*}}}*/
+int  Inputs2::EnumToIndex(int enum_in){/*{{{*/
+
+	_assert_(this);
+
+	/*Make sure this parameter is at the right place*/
+	#ifdef _ISSM_DEBUG_
+	if(enum_in<=InputsSTARTEnum){
+		//int* temp = xNew<int>(3);
+		_error_("Enum "<<EnumToStringx(enum_in)<<" should appear after InputsSTARTEnum");
+	}
+	if(enum_in>=InputsENDEnum){
+		_error_("Enum "<<EnumToStringx(enum_in)<<" should appear before InputsENDEnum");
+	}
+	#endif
+	return enum_in - InputsSTARTEnum -1;
+}/*}}}*/
+bool Inputs2::Exist(int enum_in){/*{{{*/
+
+	_assert_(this);
+
+	int index = EnumToIndex(enum_in);
+	if(this->inputs[index]) return true;
+	return false;
+}
+/*}}}*/
+int Inputs2::GetInputObjectEnum(int enum_in){/*{{{*/
+
+	_assert_(this);
+
+	int index = EnumToIndex(enum_in);
+	if(!this->inputs[index]) _error_("Input "<<EnumToStringx(enum_in)<<" not found");
+	return this->inputs[index]->ObjectEnum();
+}
+/*}}}*/
+void Inputs2::GetInputsInterpolations(int* pnuminputs,int** pinterpolations,int** pinputenums){/*{{{*/
+
+	/*First count number of inputs*/
+	int count = 0;
+	for(int i=0;i<NUMINPUTS;i++){
+		if(this->inputs[i]) count++;
+	}
+	int numinputs = count;
+
+	/*Allocate output*/
+	int* interpolations = xNew<int>(count);
+	int* enumlist       = xNew<int>(count);
+
+	/*Go through all inputs and assign interpolation in vector*/
+	count = 0;
+	for(int i=0;i<NUMINPUTS;i++){
+
+		Input2* input=this->inputs[i];
+		if(!input) continue;
+
+		enumlist[count] = i+InputsSTARTEnum+1;
+		switch(input->ObjectEnum()){
+			case BoolInput2Enum:
+			case IntInput2Enum:
+				interpolations[count] = input->ObjectEnum();
+				break;
+			case TriaInput2Enum:
+				interpolations[count] = input->GetResultInterpolation();
+				break;
+			default:
+				_error_("Input "<<EnumToStringx(input->ObjectEnum())<<" not supported yet");
+		}
+		count++;
+	}
+	_assert_(count == numinputs);
+
+	/*Return pointer*/
+	*pnuminputs = numinputs;
+	*pinterpolations = interpolations;
+	*pinputenums = enumlist;
+
+}/*}}}*/
+SegInput2* Inputs2::GetSegInput(int enum_in){/*{{{*/
+
+	_assert_(this);
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	return input->GetSegInput();
+}/*}}}*/
+TriaInput2* Inputs2::GetTriaInput(int enum_in){/*{{{*/
+
+	_assert_(this);
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	return input->GetTriaInput();
+}/*}}}*/
+TriaInput2* Inputs2::GetTriaInput(int enum_in,IssmDouble time){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	if(input->ObjectEnum()==TransientInput2Enum){
+		return xDynamicCast<TransientInput2*>(input)->GetTriaInput(time);
+	}
+	else{
+		return input->GetTriaInput();
+	}
+}/*}}}*/
+TriaInput2* Inputs2::GetTriaInput(int enum_in,IssmDouble start_time,IssmDouble end_time){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	if(input->ObjectEnum()==TransientInput2Enum){
+		return xDynamicCast<TransientInput2*>(input)->GetTriaInput(start_time,end_time);
+	}
+	else{
+		_error_("Input "<<EnumToStringx(enum_in)<<" is not an TransientInput2");
+	}
+}/*}}}*/
+PentaInput2* Inputs2::GetPentaInput(int enum_in){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	return input->GetPentaInput();
+}/*}}}*/
+PentaInput2* Inputs2::GetPentaInput(int enum_in,IssmDouble time){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	if(input->ObjectEnum()==TransientInput2Enum){
+		return xDynamicCast<TransientInput2*>(input)->GetPentaInput(time);
+	}
+	else{
+		return input->GetPentaInput();
+	}
+}/*}}}*/
+TransientInput2* Inputs2::GetTransientInput(int enum_in){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+
+	if(input->ObjectEnum() != TransientInput2Enum){
+		_error_("Input "<<EnumToStringx(enum_in)<<" is not an TransientInput2");
+	}
+
+	/*Cast and return*/
+	TransientInput2* output = xDynamicCast<TransientInput2*>(input);
+	return output;
+}/*}}}*/
+ElementInput2* Inputs2::GetControlInput2Data(int enum_in,const char* data){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+	if(input->ObjectEnum() != ControlInput2Enum){
+		_error_("Input "<<EnumToStringx(enum_in)<<" is not an ControlInput2");
+	}
+
+	/*Cast and return*/
+	return xDynamicCast<ControlInput2*>(input)->GetInput2(data);
+}/*}}}*/
+DatasetInput2* Inputs2::GetDatasetInput2(int enum_in){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+	if(input->ObjectEnum() != DatasetInput2Enum){
+		_error_("Input "<<EnumToStringx(enum_in)<<" is not an DatasetInput2");
+	}
+
+	/*Cast and return*/
+	return xDynamicCast<DatasetInput2*>(input);
+}/*}}}*/
+ControlInput2* Inputs2::GetControlInput2(int enum_in){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Check that it has the right format*/
+	Input2* input = this->inputs[id];
+	if(!input) return NULL;
+	if(input->ObjectEnum() != ControlInput2Enum){
+		_error_("Input "<<EnumToStringx(enum_in)<<" is not an ControlInput2");
+	}
+
+	/*Cast and return*/
+	return xDynamicCast<ControlInput2*>(input);
+}/*}}}*/
+void Inputs2::GetArray(int enum_in,int row,IssmDouble** pvalues,int* pN){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=ArrayInput2Enum) _error_(EnumToStringx(this->inputs[id]->ObjectEnum())<<" cannot return an array");
+	}
+	else{
+		_error_("Input "<<EnumToStringx(enum_in)<<" not found");
+	}
+
+	/*Set input*/
+	ArrayInput2* input = xDynamicCast<ArrayInput2*>(this->inputs[id]);
+	input->GetArray(row,pvalues,pN);
+}/*}}}*/
+void Inputs2::GetInputValue(bool* pvalue,int enum_in,int index){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=BoolInput2Enum) _error_(EnumToStringx(this->inputs[id]->ObjectEnum())<<" cannot return a bool");
+	}
+	else{
+		_error_("Input "<<EnumToStringx(enum_in)<<" not found");
+	}
+
+	/*Set input*/
+	BoolInput2* input = xDynamicCast<BoolInput2*>(this->inputs[id]);
+	input->GetInput(pvalue,index);
+}/*}}}*/
+void Inputs2::GetInputValue(int* pvalue,int enum_in,int index){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=IntInput2Enum) _error_(EnumToStringx(this->inputs[id]->ObjectEnum())<<" cannot return a int");
+	}
+	else{
+		int* temp = xNew<int>(3);
+		_error_("Input "<<EnumToStringx(enum_in)<<" not found");
+	}
+
+	/*Set input*/
+	IntInput2* input = xDynamicCast<IntInput2*>(this->inputs[id]);
+	input->GetInput(pvalue,index);
+}/*}}}*/
+void Inputs2::ResultInterpolation(int* pinterpolation,int* pnodesperelement,int* parray_size, int output_enum){/*{{{*/
+
+	/*Get input */
+	int     index = EnumToIndex(output_enum);
+	Input2* input = this->inputs[index];
+
+	/*Check that it is found*/
+	if(!input){
+		_error_("Input "<<EnumToStringx(output_enum)<<" not found and cannot be added to model results");
+	}
+
+	/*Assign output pointer*/
+	*pinterpolation   = input->GetResultInterpolation();
+	*pnodesperelement = input->GetResultNumberOfNodes();
+	*parray_size      = input->GetResultArraySize();
+}/*}}}*/
+void Inputs2::SetInput(int enum_in,int index,bool value){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=BoolInput2Enum) _error_("cannot add a bool to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new BoolInput2(this->numberofelements_local);
+	}
+
+	/*Set input*/
+	BoolInput2* input = xDynamicCast<BoolInput2*>(this->inputs[id]);
+	input->SetInput(index,value);
+}/*}}}*/
+void Inputs2::SetInput(int enum_in,int index,int value){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=IntInput2Enum) _error_("cannot add an int to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new IntInput2(this->numberofelements_local);
+	}
+
+	/*Set input*/
+	IntInput2* input = xDynamicCast<IntInput2*>(this->inputs[id]);
+	input->SetInput(index,value);
+}/*}}}*/
+void Inputs2::SetArrayInput(int enum_in,int row,IssmDouble* values,int numlayers){/*{{{*/
+
+	bool recreate = false;
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=ArrayInput2Enum){
+			delete this->inputs[id];
+			recreate = true;
+		}
+	}
+	else{
+		recreate = true;
+	}
+
+	if(recreate){
+		this->inputs[id] = new ArrayInput2(this->numberofelements_local);
+	}
+
+	/*Set input*/
+	ArrayInput2* input = xDynamicCast<ArrayInput2*>(this->inputs[id]);
+	input->SetInput(row,numlayers,values);
+}/*}}}*/
+TransientInput2* Inputs2::SetDatasetTransientInput(int enum_in,int dataset_id,IssmDouble* times,int numtimes){/*{{{*/
+
+	bool recreate = false;
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=DatasetInput2Enum){
+			delete this->inputs[id];
+			recreate = true;
+		}
+	}
+	else{
+		recreate = true;
+	}
+
+	if(recreate){
+		this->inputs[id] = new DatasetInput2(this->numberofelements_local,this->numberofvertices_local);
+	}
+
+	/*Get Dataset Input now*/
+	DatasetInput2* input = xDynamicCast<DatasetInput2*>(this->inputs[id]);
+
+	/*Create and return transient input*/
+	return input->SetTransientInput(dataset_id,times,numtimes);
+}/*}}}*/
+void Inputs2::SetTransientInput(int enum_in,IssmDouble* times,int numtimes){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		/*Input already there, make sure it is the right type*/
+		if(this->inputs[id]->ObjectEnum()!=TransientInput2Enum){
+			_error_("cannot add a TransientInput to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+		}
+	}
+	else{
+		this->inputs[id] = new TransientInput2(enum_in,this->numberofelements_local,this->numberofvertices_local,times,numtimes);
+	}
+}/*}}}*/
+void Inputs2::SetTriaControlInput(int enum_in,int layout,int interpolation,int control_id,int numindices,int* indices,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max){/*{{{*/
+
+	bool recreate = false;
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=ControlInput2Enum){
+			delete this->inputs[id];
+			recreate = true;
+		}
+	}
+	else{
+		recreate = true;
+	}
+
+	if(recreate){
+		this->inputs[id] = new ControlInput2(this->numberofelements_local,this->numberofvertices_local,layout,interpolation,control_id);
+	}
+
+	/*Set input*/
+	ControlInput2* input = xDynamicCast<ControlInput2*>(this->inputs[id]);
+	input->SetControl(interpolation,numindices,indices,values,values_min,values_max);
+}/*}}}*/
+void Inputs2::SetTriaControlInputGradient(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(!this->inputs[id]) _error_("could not find Input "<<EnumToStringx(enum_in));
+	if( this->inputs[id]->ObjectEnum()!=ControlInput2Enum) _error_("Input "<<EnumToStringx(enum_in)<<" is not a ControlInput2");
+
+	/*Set input*/
+	ControlInput2* input = xDynamicCast<ControlInput2*>(this->inputs[id]);
+	input->SetGradient(interpolation,numindices,indices,values);
+}/*}}}*/
+void Inputs2::SetTriaControlInputGradient(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values,int n){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(!this->inputs[id]) _error_("could not find Input "<<EnumToStringx(enum_in));
+	if( this->inputs[id]->ObjectEnum()!=ControlInput2Enum) _error_("Input "<<EnumToStringx(enum_in)<<" is not a ControlInput2");
+
+	/*Set input*/
+	ControlInput2* input = xDynamicCast<ControlInput2*>(this->inputs[id]);
+	input->SetGradient(interpolation,numindices,indices,values,n);
+}/*}}}*/
+void Inputs2::SetTriaDatasetInput(int enum_in,int id_in,int interpolation,int numindices,int* indices,IssmDouble* values){/*{{{*/
+
+	bool recreate = false;
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=DatasetInput2Enum){
+			delete this->inputs[id];
+			recreate = true;
+		}
+	}
+	else{
+		recreate = true;
+	}
+
+	if(recreate){
+		this->inputs[id] = new DatasetInput2(this->numberofelements_local,this->numberofvertices_local);
+	}
+
+	/*Set input*/
+	DatasetInput2* input = xDynamicCast<DatasetInput2*>(this->inputs[id]);
+	input->SetTriaInput(id_in,P1Enum,numindices,indices,values);
+}/*}}}*/
+void Inputs2::SetTriaInput(int enum_in,int interpolation,int row,IssmDouble value){/*{{{*/
+
+	/*This one only supports P0 and P1 because it assumes col=0*/
+	_assert_(interpolation==P0Enum || interpolation==P1Enum);
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=TriaInput2Enum) _error_("cannot add a bool to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new TriaInput2(this->numberofelements_local,this->numberofvertices_local,interpolation);
+	}
+
+	/*Set input*/
+	TriaInput2* input = xDynamicCast<TriaInput2*>(this->inputs[id]);
+	input->SetInput(interpolation,row,value);
+}/*}}}*/
+void Inputs2::SetTriaInput(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=TriaInput2Enum){
+			_error_("cannot add Element values to a "<<EnumToStringx(this->inputs[id]->ObjectEnum())<<" while trying to set "<<EnumToStringx(enum_in));
+		}
+	}
+	else{
+		this->inputs[id] = new TriaInput2(this->numberofelements_local,this->numberofvertices_local,interpolation);
+	}
+
+	/*Set input*/
+	TriaInput2* input = xDynamicCast<TriaInput2*>(this->inputs[id]);
+	input->SetInput(interpolation,numindices,indices,values);
+}/*}}}*/
+void Inputs2::SetTriaInput(int enum_in,int interpolation,int row,int numindices,IssmDouble* values){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=TriaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new TriaInput2(this->numberofelements_local,this->numberofvertices_local,interpolation);
+	}
+
+	/*Set input*/
+	TriaInput2* input = xDynamicCast<TriaInput2*>(this->inputs[id]);
+	input->SetInput(interpolation,row,numindices,values);
+}/*}}}*/
+void Inputs2::SetPentaControlInput(int enum_in,int layout,int interpolation,int control_id,int numindices,int* indices,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max){/*{{{*/
+
+	bool recreate = false;
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=ControlInput2Enum){
+			delete this->inputs[id];
+			recreate = true;
+		}
+	}
+	else{
+		recreate = true;
+	}
+
+	if(recreate){
+		this->inputs[id] = new ControlInput2(this->numberofelements_local,this->numberofvertices_local,layout,interpolation,control_id);
+	}
+
+	/*Set input*/
+	ControlInput2* input = xDynamicCast<ControlInput2*>(this->inputs[id]);
+	input->SetControl(interpolation,numindices,indices,values,values_min,values_max);
+}/*}}}*/
+void Inputs2::SetPentaControlInputGradient(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(!this->inputs[id]) _error_("could not find Input "<<EnumToStringx(enum_in));
+	if( this->inputs[id]->ObjectEnum()!=ControlInput2Enum) _error_("Input "<<EnumToStringx(enum_in)<<" is not a ControlInput2");
+
+	/*Set input*/
+	ControlInput2* input = xDynamicCast<ControlInput2*>(this->inputs[id]);
+	input->SetGradient(interpolation,numindices,indices,values);
+}/*}}}*/
+void Inputs2::SetPentaDatasetInput(int enum_in,int id_in,int interpolation,int numindices,int* indices,IssmDouble* values){/*{{{*/
+
+	bool recreate = false;
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=DatasetInput2Enum){
+			delete this->inputs[id];
+			recreate = true;
+		}
+	}
+	else{
+		recreate = true;
+	}
+
+	if(recreate){
+		this->inputs[id] = new DatasetInput2(this->numberofelements_local,this->numberofvertices_local);
+	}
+
+	/*Set input*/
+	DatasetInput2* input = xDynamicCast<DatasetInput2*>(this->inputs[id]);
+	input->SetPentaInput(id_in,P1Enum,numindices,indices,values);
+}/*}}}*/
+void Inputs2::SetPentaInput(int enum_in,int interpolation,int row,IssmDouble value){/*{{{*/
+
+	/*This one only supports P0 and P1 because it assumes col=0*/
+	_assert_(interpolation==P0Enum || interpolation==P1Enum);
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=PentaInput2Enum) _error_("cannot add a bool to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new PentaInput2(this->numberofelements_local,this->numberofvertices_local,interpolation);
+	}
+
+	/*Set input*/
+	PentaInput2* input = xDynamicCast<PentaInput2*>(this->inputs[id]);
+	input->SetInput(interpolation,row,value);
+}/*}}}*/
+void Inputs2::SetPentaInput(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=PentaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new PentaInput2(this->numberofelements_local,this->numberofvertices_local,interpolation);
+	}
+
+	/*Set input*/
+	PentaInput2* input = xDynamicCast<PentaInput2*>(this->inputs[id]);
+	input->SetInput(interpolation,numindices,indices,values);
+}/*}}}*/
+void Inputs2::SetPentaInput(int enum_in,int interpolation,int row,int numindices,IssmDouble* values){/*{{{*/
+
+	/*Get input id*/
+	int id = EnumToIndex(enum_in);
+
+	/*Create it if necessary*/
+	if(this->inputs[id]){
+		if(this->inputs[id]->ObjectEnum()!=PentaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[id]->ObjectEnum()));
+	}
+	else{
+		this->inputs[id] = new PentaInput2(this->numberofelements_local,this->numberofvertices_local,interpolation);
+	}
+
+	/*Set input*/
+	PentaInput2* input = xDynamicCast<PentaInput2*>(this->inputs[id]);
+	input->SetInput(interpolation,row,numindices,values);
+}/*}}}*/
Index: /issm/trunk/src/c/classes/Inputs2/Inputs2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/Inputs2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/Inputs2.h	(revision 24686)
@@ -0,0 +1,88 @@
+#ifndef _CONTAINER_INPUTS2_H_
+#define _CONTAINER_INPUTS2_H_
+
+/*forward declarations */
+class Input2;
+class SegInput2;
+class TriaInput2;
+class PentaInput2;
+class TransientInput2;
+class ElementInput2;
+class DatasetInput2;
+class ArrayInput2;
+class ControlInput2;
+class Parameters;
+#include "../../shared/shared.h"
+
+#define NUMINPUTS InputsENDEnum - InputsSTARTEnum -1
+
+/*!\brief Declaration of Inputs class.
+ *
+ * Declaration of Inputs class.  Inputs are a static array of Input objects.
+ */
+class Inputs2{
+
+	private:
+		/*Private fields*/
+		Input2* inputs[NUMINPUTS];
+		int     numberofelements_local;
+		int     numberofvertices_local;
+
+		/*Private functions*/
+		int     EnumToIndex(int enum_in);
+
+	public:
+
+		/*constructors, destructors*/
+		Inputs2();
+		Inputs2(int nbe,int nbv);
+		~Inputs2();
+
+		/*numerics*/
+		void     AddInput(Input2* in_input);
+		void     ChangeEnum(int enumtype,int new_enumtype);
+		void     Configure(Parameters* parameters);
+		Inputs2* Copy(void);
+		int      DeleteInput(int enum_type);
+		void     DuplicateInput(int original_enum,int new_enum);
+		void     DeepEcho(void);
+		void     Echo(void);
+		bool     Exist(int enum_type);
+		void     GetInputsInterpolations(int* pnuminputs,int** pinterpolations,int** penum);
+		void             GetArray(int enum_in,int row,IssmDouble** pvalues,int* pN);
+		SegInput2*       GetSegInput(int enum_type);
+		TriaInput2*      GetTriaInput(int enum_type);
+		TriaInput2*      GetTriaInput(int enum_type,IssmDouble time);
+		TriaInput2*      GetTriaInput(int enum_in,IssmDouble start_time,IssmDouble end_time);
+		PentaInput2*     GetPentaInput(int enum_type);
+		PentaInput2*     GetPentaInput(int enum_type,IssmDouble time);
+		TransientInput2* GetTransientInput(int enum_type);
+		ElementInput2*   GetControlInput2Data(int enum_type,const char* data);
+		DatasetInput2*   GetDatasetInput2(int enum_type);
+		ControlInput2*   GetControlInput2(int enum_type);
+		void  Marshall(char** pmarshalled_data, int* pmarshalled_data_size, int marshall_direction);
+		int   GetInputObjectEnum(int enum_type);
+		void  GetInputValue(bool* pvalue,int enum_in,int index);
+		void  GetInputValue(int*  pvalue,int enum_in,int index);
+		void  ResultInterpolation(int* pinterpolation,int*nodesperelement,int* parray_size, int output_enum);
+		void  SetInput(int enum_in,int index,bool value);
+		void  SetInput(int enum_in,int index,int value);
+		void  SetTransientInput(int enum_in,IssmDouble* times,int numtimes);
+		TransientInput2* SetDatasetTransientInput(int enum_in,int id,IssmDouble* times,int numtimes);
+		void  SetArrayInput(int enum_in,int row,IssmDouble* layers,int numlayers);
+		void  SetTriaControlInput(int enum_in,int layout,int interpolation,int id,int numindices,int* indices,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max);
+		void  SetTriaControlInputGradient(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values);
+		void  SetTriaControlInputGradient(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values,int n);
+		void  SetTriaDatasetInput(int enum_in,int id,int interpolation,int numindices,int* indices,IssmDouble* values);
+		void  SetTriaInput(int enum_in,int interpolation,int row,IssmDouble values);
+		void  SetTriaInput(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values);
+		void  SetTriaInput(int enum_in,int interpolation,int row,int numindices,IssmDouble* values);
+		void  SetPentaControlInput(int enum_in,int layout,int interpolation,int id,int numindices,int* indices,IssmDouble* values,IssmDouble* values_min,IssmDouble* values_max);
+		void  SetPentaControlInputGradient(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values);
+		void  SetPentaDatasetInput(int enum_in,int id,int interpolation,int numindices,int* indices,IssmDouble* values);
+		void  SetPentaInput(int enum_in,int interpolation,int row,IssmDouble values);
+		void  SetPentaInput(int enum_in,int interpolation,int numindices,int* indices,IssmDouble* values);
+		void  SetPentaInput(int enum_in,int interpolation,int row,int numindices,IssmDouble* values);
+};
+
+#endif //ifndef _INPUTS_H_
Index: /issm/trunk/src/c/classes/Inputs2/IntInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/IntInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/IntInput2.cpp	(revision 24686)
@@ -0,0 +1,97 @@
+/*!\file IntInput2.c
+ * \brief: implementation of the IntInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "../../shared/shared.h"
+#include "./IntInput2.h"
+
+/*IntInput2 constructors and destructor*/
+IntInput2::IntInput2(){/*{{{*/
+	this->size   = -1;
+	this->values = NULL;
+}
+/*}}}*/
+IntInput2::IntInput2(int size_in){/*{{{*/
+	_assert_(size_in>0);
+	_assert_(size_in<1e11);
+	this->size   = size_in;
+	this->values = xNew<int>(size_in);
+}
+/*}}}*/
+IntInput2::~IntInput2(){/*{{{*/
+	xDelete<int>(this->values);
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* IntInput2::copy() {/*{{{*/
+
+	IntInput2* output = new IntInput2(this->size);
+	xMemCpy<int>(output->values,this->values,this->size);
+
+	return output;
+}
+/*}}}*/
+void IntInput2::DeepEcho(void){/*{{{*/
+
+	_printf_("IntInput2 Echo:\n");
+	_printf_("   Size:          "<<size<<"\n");
+	printarray(this->values,this->size);
+	//_printf_(setw(15)<<"   IntInput2 "<<setw(25)<<left<<EnumToStringx(this->enum_type)<<" "<<(value?"true":"false") << "\n");
+}
+/*}}}*/
+void IntInput2::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  IntInput2::Id(void){ return -1; }/*{{{*/
+/*}}}*/
+void IntInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(IntInput2Enum);
+
+	MARSHALLING(this->size);
+	if(this->size > 0){
+		MARSHALLING_DYNAMIC(this->values,int,this->size)
+	}
+	else this->values = NULL;
+
+}
+/*}}}*/
+int  IntInput2::ObjectEnum(void){/*{{{*/
+
+	return IntInput2Enum;
+
+}
+/*}}}*/
+
+/*IntInput2 management*/
+void IntInput2::GetInput(int* pvalue,int index){/*{{{*/
+
+	if(index<0){
+		printf("-------------- file: IntInput2.cpp line: %i\n",__LINE__); 
+		int* temp = xNew<int>(3);
+	}
+	_assert_(index>=0); 
+	_assert_(index<this->size); 
+
+	*pvalue = this->values[index];
+}
+/*}}}*/
+void IntInput2::SetInput(int index,int value){/*{{{*/
+
+	_assert_(index>=0); 
+	_assert_(index<this->size); 
+
+	this->values[index] = value;
+}
+/*}}}*/
+
+/*Object functions*/
Index: /issm/trunk/src/c/classes/Inputs2/IntInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/IntInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/IntInput2.h	(revision 24686)
@@ -0,0 +1,33 @@
+#ifndef _INTINPUT2_H_
+#define _INTINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+
+class IntInput2: public Input2{
+
+	private:
+		int   size;
+		int*  values;
+
+	public:
+		/*IntInput2 constructors, destructors: {{{*/
+		IntInput2();
+		IntInput2(int size_in);
+		~IntInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2 *copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*IntInput2 management: {{{*/
+		void GetInput(int* pvalue,int index);
+		void SetInput(int index,int value);
+		/*}}}*/
+
+};
+#endif  /* _BOOLINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/PentaInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/PentaInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/PentaInput2.cpp	(revision 24686)
@@ -0,0 +1,404 @@
+/*!\file PentaInput2.c
+ * \brief: implementation of the PentaInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "../../shared/shared.h"
+#include "./PentaInput2.h"
+
+/*PentaInput2 constructors and destructor*/
+PentaInput2::PentaInput2(void){/*{{{*/
+
+	this->numberofelements_local = -1;
+	this->numberofvertices_local = -1;
+	this->isserved       = false;
+	this->isserved_collapsed= 0;
+	this->M = -1;
+	this->N = -1;
+	this->values         = NULL;
+	this->element_values = NULL;
+
+}/*}}}*/
+PentaInput2::PentaInput2(int nbe_in,int nbv_in,int interp_in){/*{{{*/
+
+	_assert_(nbe_in>0);
+	_assert_(nbe_in<1e11);
+	_assert_(nbv_in>0);
+	_assert_(nbv_in<1e11);
+	this->numberofelements_local = nbe_in;
+	this->numberofvertices_local = nbv_in;
+	this->isserved           = false;
+	this->isserved_collapsed = 0;
+
+	/*Reset takes care of the rest*/
+	this->Reset(interp_in);
+
+}/*}}}*/
+PentaInput2::~PentaInput2(){/*{{{*/
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+	if(this->values)         xDelete<IssmDouble>(this->values);
+}
+/*}}}*/
+void PentaInput2::Reset(int interp_in){/*{{{*/
+
+	/*Clean up*/
+	if(this->values)         xDelete<IssmDouble>(this->values);
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+
+	/*Set interpolation*/
+	this->interpolation  = interp_in;
+
+	/*Create Sizes*/
+	if(this->interpolation==P1Enum){
+		this->M = this->numberofvertices_local;
+		this->N = 1;
+	}
+	else{
+		this->M = this->numberofelements_local;
+		this->N = PentaRef::NumberofNodes(interp_in);
+	}
+
+	/*Allocate Pointers*/
+	this->values         = xNewZeroInit<IssmDouble>(this->M*this->N);
+	this->element_values = xNewZeroInit<IssmDouble>(PentaRef::NumberofNodes(interp_in));
+}/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* PentaInput2::copy() {/*{{{*/
+
+	/*Create output*/
+	PentaInput2* output = new PentaInput2(this->numberofelements_local,this->numberofvertices_local,this->interpolation);
+
+	/*Copy values*/
+	xMemCpy<IssmDouble>(output->values,this->values,this->M*this->N);
+
+	/*Return output*/
+	return output;
+
+}
+/*}}}*/
+void PentaInput2::DeepEcho(void){/*{{{*/
+	_printf_("PentaInput2 Echo:\n");
+	_printf_("   interpolation:      "<<EnumToStringx(this->interpolation)<<"\n");
+	_printf_("   nbe_local:          "<<this->numberofvertices_local<<"\n");
+	_printf_("   nbv_local:          "<<this->numberofelements_local<<"\n");
+	_printf_("   Size:               "<<M<<"x"<<N<<"\n");
+	_printf_("   isserved:           "<<(isserved?"true":"false") << "\n");
+	_printf_("   isserved_collapsed: "<<isserved_collapsed << "\n");
+	if(isserved){
+		_printf_("   current values:      ");
+		if(isserved_collapsed){
+			_printf_("[ ");
+			for(int i=0;i<3;i++) _printf_(" "<<this->element_values[i]);
+			_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+		}
+		else{
+			_printf_("[ ");
+			for(int i=0;i<PentaRef::NumberofNodes(this->interpolation);i++) _printf_(" "<<this->element_values[i]);
+			_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+		}
+	}
+}
+/*}}}*/
+void PentaInput2::Echo(void){/*{{{*/
+	_printf_(setw(15)<<"   PentaInput "<<setw(25)<<left<<EnumToStringx(-1));
+	if(isserved){
+		if(isserved_collapsed){
+			_printf_("[ ");
+			for(int i=0;i<3;i++) _printf_(" "<<this->element_values[i]);
+			_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+		}
+		else{
+			_printf_("[ ");
+			for(int i=0;i<PentaRef::NumberofNodes(this->interpolation);i++) _printf_(" "<<this->element_values[i]);
+			_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+		}
+	}
+}
+/*}}}*/
+int  PentaInput2::Id(void){/*{{{*/
+	return -1;
+}/*}}}*/
+void PentaInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(PentaInput2Enum);
+	MARSHALLING(this->numberofelements_local);
+	MARSHALLING(this->numberofvertices_local);
+	MARSHALLING(this->interpolation);
+	MARSHALLING(this->M);
+	MARSHALLING(this->N);
+	this->isserved = false;
+	this->isserved_collapsed = 0;
+	if(this->M*this->N){
+		MARSHALLING_DYNAMIC(this->values,IssmDouble,this->M*this->N);
+	}
+	else this->values = NULL;
+
+	if(marshall_direction == MARSHALLING_BACKWARD){
+		this->element_values = xNewZeroInit<IssmDouble>(PentaRef::NumberofNodes(this->interpolation));
+	}
+}
+/*}}}*/
+int  PentaInput2::ObjectEnum(void){/*{{{*/
+	return PentaInput2Enum;
+}
+/*}}}*/
+
+/*PentaInput2 management*/
+void PentaInput2::SetInput(int interp_in,int row,IssmDouble value_in){/*{{{*/
+
+	_assert_(this);
+	_assert_(row>=0); 
+	_assert_(row<this->M); 
+	_assert_(this->N==1);
+
+	this->values[row] = value_in;
+	this->isserved    = false;
+}
+/*}}}*/
+void PentaInput2::SetInput(int interp_in,int numindices,int* indices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	if(interp_in==P1Enum && this->interpolation==P1Enum){
+		_assert_(this->N==1);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else if(this->interpolation!=P1Enum && interp_in==P1Enum){
+		this->Reset(interp_in);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else{
+		_error_("not supported");
+	}
+
+	this->isserved    = false;
+}
+/*}}}*/
+void PentaInput2::SetInput(int interp_in,int row,int numindices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	if(interp_in==this->interpolation){
+		_assert_(this->N==numindices);
+	}
+	else{
+		this->Reset(interp_in);
+		_assert_(this->N==numindices);
+	}
+	for(int i=0;i<numindices;i++) this->values[row*this->N+i] = values_in[i];
+
+	this->isserved    = false;
+}
+/*}}}*/
+void PentaInput2::Serve(int numindices,int* indices){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N==1);
+
+	for(int i=0;i<numindices;i++){
+		int row = indices[i];
+		_assert_(row>=0); 
+		_assert_(row<this->M); 
+		this->element_values[i] = this->values[row];
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+	this->isserved_collapsed = 0;
+}
+/*}}}*/
+void PentaInput2::Serve(int row,int numindices){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N==numindices);
+	_assert_(row<this->M);
+	_assert_(row>=0);
+
+	for(int i=0;i<numindices;i++){
+		this->element_values[i] = this->values[row*this->N+i];
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+	this->isserved_collapsed = 0;
+}/*}}}*/
+void PentaInput2::ServeCollapsed(int row,int state){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N>=3);
+	_assert_(row<this->M);
+	_assert_(row>=0);
+
+	if(state==1){
+		for(int i=0;i<3;i++) this->element_values[i] = this->values[row*this->N+i];
+		for(int i=3;i<6;i++) this->element_values[i] = 0.;
+	}
+	else if(state==2){
+		for(int i=0;i<3;i++) this->element_values[i] = this->values[row*this->N+3+i];
+		for(int i=3;i<6;i++) this->element_values[i] = 0.;
+	}
+	else{
+		_error_("not supported");
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+	this->isserved_collapsed = state;
+}/*}}}*/
+void PentaInput2::SetServeCollapsed(int state){/*{{{*/
+	this->isserved_collapsed = state;
+}/*}}}*/
+int  PentaInput2::GetInterpolation(){/*{{{*/
+	return this->interpolation;
+}/*}}}*/
+void PentaInput2::GetInputAverage(IssmDouble* pvalue){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	/*Output*/
+	IssmDouble value = 0.;
+
+	if(this->isserved_collapsed){
+		if(this->interpolation==P0Enum){
+			value = this->element_values[0];
+		}
+		else{
+			/*Assume P1...*/
+			value = 1./3.*(this->element_values[0] +  this->element_values[1] +  this->element_values[2]);
+		}
+	}
+	else{
+		int        numnodes  = this->NumberofNodes(this->interpolation);
+		IssmDouble numnodesd = reCast<int,IssmDouble>(numnodes);
+
+		for(int i=0;i<numnodes;i++) value+=this->element_values[i];
+		value = value/numnodesd;
+	}
+
+	*pvalue=value;
+}/*}}}*/
+IssmDouble PentaInput2::GetInputMin(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int  numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 3;
+	IssmDouble min=this->element_values[0];
+
+	for(int i=1;i<numnodes;i++){
+		if(this->element_values[i]<min) min=this->element_values[i];
+	}
+	return min;
+}/*}}}*/
+IssmDouble PentaInput2::GetInputMax(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 3;
+	IssmDouble max=this->element_values[0];
+
+	for(int i=1;i<numnodes;i++){
+		if(this->element_values[i]>max) max=this->element_values[i];
+	}
+	return max;
+}/*}}}*/
+IssmDouble PentaInput2::GetInputMaxAbs(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 3;
+	IssmDouble maxabs=fabs(this->element_values[0]);
+
+	for(int i=1;i<numnodes;i++){
+		if(fabs(this->element_values[i])>maxabs) maxabs=fabs(this->element_values[i]);
+	}
+	return maxabs;
+}/*}}}*/
+void PentaInput2::GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+	if(this->isserved_collapsed){
+		_assert_(gauss->Enum()==GaussTriaEnum);
+		if(this->interpolation==P0Enum){
+			derivativevalues[0] = 0.;
+			derivativevalues[1] = 0.;
+		}
+		else{
+			TriaRef temp;
+			temp.GetInputDerivativeValue(derivativevalues,this->element_values,xyz_list,(GaussTria*)gauss,P1Enum);
+		}
+	}
+	else{
+		_assert_(gauss->Enum()==GaussPentaEnum);
+		PentaRef::GetInputDerivativeValue(derivativevalues,this->element_values,xyz_list,(GaussPenta*)gauss,this->interpolation);
+	}
+}/*}}}*/
+void PentaInput2::GetInputValue(IssmDouble* pvalue,Gauss* gauss){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+	if(this->isserved_collapsed){
+		_assert_(gauss->Enum()==GaussTriaEnum);
+		if(this->interpolation==P0Enum){
+			*pvalue = this->element_values[0];
+		}
+		else{
+			TriaRef temp;
+			temp.GetInputValue(pvalue,this->element_values,(GaussTria*)gauss,P1Enum);
+		}
+	}
+	else{
+		_assert_(gauss->Enum()==GaussPentaEnum);
+		PentaRef::GetInputValue(pvalue,this->element_values,(GaussPenta*)gauss,this->interpolation);
+	}
+}/*}}}*/
+int  PentaInput2::GetResultArraySize(void){/*{{{*/
+	return 1;
+}
+/*}}}*/
+int  PentaInput2::GetResultInterpolation(void){/*{{{*/
+	if(this->interpolation==P0Enum || this->interpolation==P0DGEnum){
+		return P0Enum;
+	}
+	return P1Enum;
+}/*}}}*/
+int  PentaInput2::GetResultNumberOfNodes(void){/*{{{*/
+	return this->N;
+}
+/*}}}*/
+void PentaInput2::Scale(IssmDouble alpha){/*{{{*/
+
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = alpha*this->values[i];
+	for(int i=0;i<PentaRef::NumberofNodes(this->interpolation);i++) this->element_values[i] = alpha*this->element_values[i];
+}
+/*}}}*/
+void PentaInput2::AXPY(Input2* xinput,IssmDouble alpha){/*{{{*/
+
+	/*xinput is of the same type, so cast it: */
+	if(xinput->ObjectEnum()!=PentaInput2Enum) _error_("Operation not permitted because xinput is of type " << EnumToStringx(xinput->ObjectEnum()));
+	PentaInput2* xpentainput=xDynamicCast<PentaInput2*>(xinput);
+	if(xpentainput->GetInterpolation()!=this->interpolation) _error_("Operation not permitted because xinput is of type " << EnumToStringx(xinput->ObjectEnum()));
+
+	/*Carry out the AXPY operation depending on type:*/
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = alpha*xpentainput->values[i] + this->values[i];
+	for(int i=0;i<PentaRef::NumberofNodes(this->interpolation);i++) this->element_values[i] = alpha*xpentainput->element_values[i] + this->element_values[i];
+}
+/*}}}*/
+
+/*Object functions*/
Index: /issm/trunk/src/c/classes/Inputs2/PentaInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/PentaInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/PentaInput2.h	(revision 24686)
@@ -0,0 +1,51 @@
+#ifndef _PENTAINPUT2_H_
+#define _PENTAINPUT2_H_
+
+/*Headers:*/
+#include "./ElementInput2.h"
+#include "../Elements/PentaRef.h"
+
+class PentaInput2: public ElementInput2, public PentaRef{
+
+	private:
+		int isserved_collapsed;
+	public:
+		/*PentaInput2 constructors, destructors: {{{*/
+		PentaInput2();
+		PentaInput2(int nbe_in,int nbv_in,int interp_in);
+		~PentaInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2 *copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*PentaInput2 management: {{{*/
+		void SetInput(int interp_in,int row,IssmDouble value_in);
+		void SetInput(int interp_in,int numinds,int* rows,IssmDouble* values_in);
+		void SetInput(int interp_in,int row,int numinds,IssmDouble* values_in);
+		int  GetInterpolation();
+		void GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss);
+		void GetInputAverage(IssmDouble* pvalue);
+		IssmDouble GetInputMin();
+		IssmDouble GetInputMax();
+		IssmDouble GetInputMaxAbs();
+		PentaInput2* GetPentaInput(){return this;};
+		void GetInputValue(IssmDouble* pvalue,Gauss* gauss);
+		void Scale(IssmDouble scalar);
+		void AXPY(Input2* xinput,IssmDouble scalar);
+		void Serve(int numindices,int* indices);
+		void Serve(int row,int numindices);
+		void ServeCollapsed(int row,int state);
+		void SetServeCollapsed(int);
+		int  GetResultArraySize(void);
+		int  GetResultInterpolation(void);
+		int  GetResultNumberOfNodes(void);
+		/*}}}*/
+		void Reset(int interp_in);
+
+};
+#endif  /* _TRIAINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/SegInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/SegInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/SegInput2.cpp	(revision 24686)
@@ -0,0 +1,324 @@
+/*!\file SegInput2.c
+ * \brief: implementation of the SegInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "../../shared/shared.h"
+#include "./SegInput2.h"
+
+/*SegInput2 constructors and destructor*/
+SegInput2::SegInput2(void){/*{{{*/
+
+	this->numberofelements_local = -1;
+	this->numberofvertices_local = -1;
+	this->isserved       = false;
+	this->M = -1;
+	this->N = -1;
+	this->values         = NULL;
+	this->element_values = NULL;
+
+}/*}}}*/
+SegInput2::SegInput2(int nbe_in,int nbv_in,int interp_in){/*{{{*/
+
+	_assert_(nbe_in>0);
+	_assert_(nbe_in<1e11);
+	_assert_(nbv_in>0);
+	_assert_(nbv_in<1e11);
+	this->numberofelements_local = nbe_in;
+	this->numberofvertices_local = nbv_in;
+	this->isserved       = false;
+
+	/*Reset takes care of the rest*/
+	this->Reset(interp_in);
+}/*}}}*/
+SegInput2::~SegInput2(){/*{{{*/
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+	if(this->values)         xDelete<IssmDouble>(this->values);
+}
+/*}}}*/
+void SegInput2::Reset(int interp_in){/*{{{*/
+
+	/*Clean up*/
+	if(this->values)         xDelete<IssmDouble>(this->values);
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+
+	/*Set interpolation*/
+	this->interpolation  = interp_in;
+
+	/*Create Sizes*/
+	if(this->interpolation==P1Enum){
+		this->M = this->numberofvertices_local;
+		this->N = 1;
+	}
+	else{
+		this->M = this->numberofelements_local;
+		this->N = SegRef::NumberofNodes(interp_in);
+	}
+
+	/*Allocate Pointers*/
+	this->values         = xNewZeroInit<IssmDouble>(this->M*this->N);
+	this->element_values = xNewZeroInit<IssmDouble>(SegRef::NumberofNodes(interp_in));
+}/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* SegInput2::copy() {/*{{{*/
+
+	SegInput2* output = new SegInput2(this->numberofelements_local,this->numberofvertices_local,this->interpolation);
+
+	xMemCpy<IssmDouble>(output->values,this->values,this->M*this->N);
+	xMemCpy<IssmDouble>(output->element_values,this->element_values,SegRef::NumberofNodes(this->interpolation));
+
+	return output;
+}
+/*}}}*/
+void SegInput2::DeepEcho(void){/*{{{*/
+	_printf_("SegInput2 Echo:\n");
+	_printf_("   interpolation: "<<EnumToStringx(this->interpolation)<<"\n");
+	_printf_("   Size:          "<<M<<"x"<<N<<"\n");
+	_printf_("   isserved:      "<<(isserved?"true":"false") << "\n");
+	if(isserved){
+		_printf_("   current values:      ");
+		for(int i=0;i<3;i++) _printf_(" "<<this->element_values[i]);
+		_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+	}
+	printarray(this->values,this->M,this->N);
+	//_printf_(setw(15)<<"   SegInput2 "<<setw(25)<<left<<EnumToStringx(this->enum_type)<<" "<<(value?"true":"false") << "\n");
+}
+/*}}}*/
+void SegInput2::Echo(void){/*{{{*/
+	_printf_("SegInput2 Echo:\n");
+	_printf_("   interpolation: "<<EnumToStringx(this->interpolation)<<"\n");
+	_printf_("   Size:          "<<M<<"x"<<N<<"\n");
+	_printf_("   isserved:      "<<(isserved?"true":"false") << "\n");
+	if(isserved){
+		_printf_("   current values:      ");
+		_printf_("[ ");
+		for(int i=0;i<3;i++) _printf_(" "<<this->element_values[i]);
+		_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+	}
+}
+/*}}}*/
+int  SegInput2::Id(void){/*{{{*/
+	return -1;
+}/*}}}*/
+void SegInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(SegInput2Enum);
+	MARSHALLING(this->numberofelements_local);
+	MARSHALLING(this->numberofvertices_local);
+	MARSHALLING(this->interpolation);
+	MARSHALLING(this->M);
+	MARSHALLING(this->N);
+	this->isserved = false;
+	if(this->M*this->N){
+		MARSHALLING_DYNAMIC(this->values,IssmDouble,this->M*this->N);
+	}
+	else this->values = NULL;
+
+	if(marshall_direction == MARSHALLING_BACKWARD){
+		this->element_values = xNewZeroInit<IssmDouble>(SegRef::NumberofNodes(this->interpolation));
+	}
+
+}
+/*}}}*/
+int  SegInput2::ObjectEnum(void){/*{{{*/
+	return SegInput2Enum;
+}
+/*}}}*/
+
+/*SegInput2 management*/
+void SegInput2::SetInput(int interp_in,int row,IssmDouble value_in){/*{{{*/
+
+	_assert_(this);
+	_assert_(row>=0); 
+	_assert_(row<this->M); 
+	_assert_(this->N==1);
+
+	this->values[row] = value_in;
+	this->isserved = false;
+}
+/*}}}*/
+void SegInput2::SetInput(int interp_in,int numindices,int* indices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	if(interp_in==P1Enum && this->interpolation==P1Enum){
+		_assert_(this->N==1);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else if(interp_in==P0Enum && this->interpolation==P0Enum){
+		_assert_(this->N==1);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else if(this->interpolation!=P1Enum && interp_in==P1Enum){
+		this->Reset(interp_in);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else{
+		_error_("Cannot convert "<<EnumToStringx(this->interpolation)<<" to "<<EnumToStringx(interp_in));
+	}
+	this->isserved = false;
+}
+/*}}}*/
+void SegInput2::SetInput(int interp_in,int row,int numindices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	if(interp_in==this->interpolation){
+		_assert_(this->N==numindices);
+	}
+	else{
+		this->Reset(interp_in);
+		_assert_(this->N==numindices);
+	}
+	for(int i=0;i<numindices;i++) this->values[row*this->N+i] = values_in[i];
+	this->isserved = false;
+}
+/*}}}*/
+void SegInput2::Serve(int numindices,int* indices){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N==1);
+
+	for(int i=0;i<numindices;i++){
+		int row = indices[i];
+		_assert_(row>=0); 
+		_assert_(row<this->M); 
+		this->element_values[i] = this->values[row];
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+}
+/*}}}*/
+void SegInput2::Serve(int row,int numindices){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N==numindices);
+	_assert_(row<this->M);
+	_assert_(row>=0);
+
+	for(int i=0;i<numindices;i++){
+		this->element_values[i] = this->values[row*this->N+i];
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+} /*}}}*/
+int  SegInput2::GetInterpolation(){/*{{{*/
+	return this->interpolation;
+}/*}}}*/
+void SegInput2::GetInputAverage(IssmDouble* pvalue){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	IssmDouble numnodesd = reCast<int,IssmDouble>(numnodes);
+	IssmDouble value     = 0.;
+
+	for(int i=0;i<numnodes;i++) value+=this->element_values[i];
+	value = value/numnodesd;
+
+	*pvalue=value;
+}/*}}}*/
+IssmDouble SegInput2::GetInputMin(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	IssmDouble min=this->element_values[0];
+
+	for(int i=1;i<numnodes;i++){
+		if(this->element_values[i]<min) min=this->element_values[i];
+	}
+	return min;
+}/*}}}*/
+IssmDouble SegInput2::GetInputMax(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	IssmDouble max=this->element_values[0];
+
+	for(int i=1;i<numnodes;i++){
+		if(this->element_values[i]>max) max=this->element_values[i];
+	}
+	return max;
+}/*}}}*/
+IssmDouble SegInput2::GetInputMaxAbs(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	IssmDouble maxabs=fabs(this->element_values[0]);
+
+	for(int i=1;i<numnodes;i++){
+		if(fabs(this->element_values[i])>maxabs) maxabs=fabs(this->element_values[i]);
+	}
+	return maxabs;
+}/*}}}*/
+void SegInput2::GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+	_assert_(gauss->Enum()==GaussSegEnum);
+	SegRef::GetInputDerivativeValue(derivativevalues,this->element_values,xyz_list,(GaussSeg*)gauss,this->interpolation);
+}/*}}}*/
+void SegInput2::GetInputValue(IssmDouble* pvalue,Gauss* gauss){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+	_assert_(gauss->Enum()==GaussSegEnum);
+	SegRef::GetInputValue(pvalue,this->element_values,(GaussSeg*)gauss,this->interpolation);
+}/*}}}*/
+int  SegInput2::GetResultArraySize(void){/*{{{*/
+	return 1;
+}
+/*}}}*/
+int  SegInput2::GetResultInterpolation(void){/*{{{*/
+	if(this->interpolation==P0Enum || this->interpolation==P0DGEnum){
+		return P0Enum;
+	}
+	return P1Enum;
+}/*}}}*/
+int  SegInput2::GetResultNumberOfNodes(void){/*{{{*/
+	return this->N;
+}
+/*}}}*/
+void SegInput2::Scale(IssmDouble alpha){/*{{{*/
+
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = alpha*this->values[i];
+	for(int i=0;i<SegRef::NumberofNodes(this->interpolation);i++) this->element_values[i] = alpha*this->element_values[i];
+}
+/*}}}*/
+void SegInput2::AXPY(Input2* xinput,IssmDouble alpha){/*{{{*/
+
+	/*xinput is of the same type, so cast it: */
+	if(xinput->ObjectEnum()!=SegInput2Enum) _error_("Operation not permitted because xinput is of type " << EnumToStringx(xinput->ObjectEnum()));
+	SegInput2* xtriainput=xDynamicCast<SegInput2*>(xinput);
+	if(xtriainput->GetInterpolation()!=this->interpolation) _error_("Operation not permitted because xinput is of type " << EnumToStringx(xinput->ObjectEnum()));
+
+	/*Carry out the AXPY operation depending on type:*/
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = alpha*xtriainput->values[i] + this->values[i];
+	for(int i=0;i<SegRef::NumberofNodes(this->interpolation);i++) this->element_values[i] = alpha*xtriainput->element_values[i] + this->element_values[i];
+}
+/*}}}*/
+
+/*Object functions*/
Index: /issm/trunk/src/c/classes/Inputs2/SegInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/SegInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/SegInput2.h	(revision 24686)
@@ -0,0 +1,47 @@
+#ifndef _SEGINPUT2_H_
+#define _SEGINPUT2_H_
+
+/*Headers:*/
+#include "./ElementInput2.h"
+#include "../Elements/SegRef.h"
+
+class SegInput2: public ElementInput2, public SegRef{
+
+	public:
+		/*SegInput2 constructors, destructors: {{{*/
+		SegInput2();
+		SegInput2(int nbe_in,int nbv_in,int interp_in);
+		~SegInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2 *copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*SegInput2 management: {{{*/
+		void SetInput(int interp_in,int row,IssmDouble value_in);
+		void SetInput(int interp_in,int numinds,int* rows,IssmDouble* values_in);
+		void SetInput(int interp_in,int row,int numinds,IssmDouble* values_in);
+		int  GetInterpolation();
+		void GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss);
+		void GetInputAverage(IssmDouble* pvalue);
+		IssmDouble GetInputMin();
+		IssmDouble GetInputMax();
+		IssmDouble GetInputMaxAbs();
+		SegInput2* GetSegInput(){return this;};
+		void GetInputValue(IssmDouble* pvalue,Gauss* gauss);
+		void Scale(IssmDouble scalar);
+		void AXPY(Input2* xinput,IssmDouble scalar);
+		void Serve(int numindices,int* indices);
+		void Serve(int row,int numindices);
+		int  GetResultArraySize(void);
+		int  GetResultInterpolation(void);
+		int  GetResultNumberOfNodes(void);
+		/*}}}*/
+		void Reset(int interp_in);
+
+};
+#endif  /* _SEGINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/TransientInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/TransientInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/TransientInput2.cpp	(revision 24686)
@@ -0,0 +1,469 @@
+/*!\file TransientInput2.c
+ * \brief: implementation of the TransientInput2 object
+ */
+/*Headers*/
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "./TransientInput2.h"
+#include "./TriaInput2.h"
+#include "./PentaInput2.h"
+#include "../../shared/shared.h"
+#include "../Params/Parameters.h"
+
+/*TransientInput2 constructors and destructor*/
+TransientInput2::TransientInput2(){/*{{{*/
+
+	enum_type=UNDEF;
+	inputs=NULL;
+	this->numtimesteps=0;
+	this->parameters=NULL;
+	this->timesteps=NULL;
+
+	this->current_input=NULL;
+	this->current_step=-1;
+
+}
+/*}}}*/
+TransientInput2::TransientInput2(int in_enum_type,int nbe,int nbv,IssmDouble* timesin,int N){/*{{{*/
+
+	/*Set Enum*/
+	this->enum_type=in_enum_type;
+	this->numberofelements_local = nbe;
+	this->numberofvertices_local = nbv;
+
+	/*Allocate values and timesteps, and copy: */
+	_assert_(N>=0 && N<1e6);
+	this->numtimesteps=N;
+	if(N>0){
+		this->timesteps=xNew<IssmDouble>(N);
+		xMemCpy(this->timesteps,timesin,N);
+
+		this->inputs     = xNew<Input2*>(N);
+		for(int i=0;i<N;i++) this->inputs[i] = NULL;
+	}
+	else{
+		this->timesteps=0;
+		this->inputs   =0;
+	}
+	this->parameters = NULL;
+	this->current_input=NULL;
+	this->current_step=-1;
+}
+/*}}}*/
+TransientInput2::~TransientInput2(){/*{{{*/
+
+	for(int i=0;i<this->numtimesteps;i++){
+		delete this->inputs[i];
+	}
+	xDelete<Input2*>(this->inputs);
+	xDelete<IssmDouble>(this->timesteps);
+
+	if(this->current_input) delete this->current_input;
+}
+/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* TransientInput2::copy() {/*{{{*/
+
+	TransientInput2* output=NULL;
+
+	output = new TransientInput2();
+	output->enum_type=this->enum_type;
+	output->numtimesteps=this->numtimesteps;
+	if(this->numtimesteps>0){
+		output->timesteps=xNew<IssmDouble>(this->numtimesteps);
+		xMemCpy(output->timesteps,this->timesteps,this->numtimesteps);
+		output->inputs = xNew<Input2*>(this->numtimesteps);
+		for(int i=0;i<this->numtimesteps;i++){
+			output->inputs[i] = this->inputs[i]->copy();
+		}
+	}
+	output->parameters=this->parameters;
+
+	return output;
+}/*}}}*/
+void TransientInput2::DeepEcho(void){/*{{{*/
+
+	int i;
+
+	_printf_("TransientInput2:\n");
+	_printf_("   enum: " << this->enum_type << " (" << EnumToStringx(this->enum_type) << ")\n");
+	_printf_("   numtimesteps: " << this->numtimesteps << "\n");
+	_printf_("---inputs: \n");
+	for(i=0;i<this->numtimesteps;i++){
+		_printf_("   time: " << this->timesteps[i]<<"  ");
+		if(this->inputs[i]) this->inputs[i]->Echo();
+		else                _printf_(" NOT SET! \n");
+	}
+}
+/*}}}*/
+void TransientInput2::Configure(Parameters* params){/*{{{*/
+	this->parameters=params;
+}
+/*}}}*/
+void TransientInput2::Echo(void){/*{{{*/
+	this->DeepEcho();
+}
+/*}}}*/
+int  TransientInput2::Id(void){ return -1; }/*{{{*/
+/*}}}*/
+void TransientInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	if (marshall_direction == MARSHALLING_BACKWARD){
+		_error_("not implmented");
+		//inputs = new Inputs();
+	}
+
+	MARSHALLING_ENUM(TransientInput2Enum);
+
+	MARSHALLING(enum_type);
+	MARSHALLING(numtimesteps);
+	MARSHALLING_DYNAMIC(this->timesteps,IssmDouble,numtimesteps);
+	//inputs->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+	_error_("not implemented");
+}
+/*}}}*/
+int  TransientInput2::ObjectEnum(void){/*{{{*/
+
+	return TransientInput2Enum;
+
+}
+/*}}}*/
+
+/*Intermediary*/
+void TransientInput2::AddTriaTimeInput(IssmDouble time,int numindices,int* indices,IssmDouble* values_in,int interp_in){/*{{{*/
+
+
+	/*Check whether this is the last time step that we have*/
+	if(this->numtimesteps){
+		if(fabs(this->timesteps[this->numtimesteps-1]-time)<1.0e-5){
+			this->AddTriaTimeInput(this->numtimesteps-1,numindices,indices,values_in,interp_in);
+			return;
+		}
+	}
+
+	/*This is a new time step! we need to add it to the list*/
+	if(this->numtimesteps>0 && time<this->timesteps[this->numtimesteps-1]) _error_("timestep values must increase sequentially");
+
+	IssmDouble *old_timesteps = NULL;
+	Input2    **old_inputs    = NULL;
+	if (this->numtimesteps > 0){
+		old_timesteps=xNew<IssmDouble>(this->numtimesteps);
+		xMemCpy(old_timesteps,this->timesteps,this->numtimesteps);
+		xDelete<IssmDouble>(this->timesteps);
+		old_inputs=xNew<Input2*>(this->numtimesteps);
+		xMemCpy(old_inputs,this->inputs,this->numtimesteps);
+		xDelete<Input2*>(this->inputs);
+	}
+
+	this->numtimesteps=this->numtimesteps+1;
+	this->timesteps=xNew<IssmDouble>(this->numtimesteps);
+	this->inputs   = xNew<Input2*>(this->numtimesteps);
+
+	if (this->numtimesteps > 1){
+		xMemCpy(this->inputs,old_inputs,this->numtimesteps-1);
+		xMemCpy(this->timesteps,old_timesteps,this->numtimesteps-1);
+		xDelete(old_timesteps);
+		xDelete<Input2*>(old_inputs);
+	}
+
+	/*go ahead and plug: */
+	this->timesteps[this->numtimesteps-1] = time;
+	this->inputs[this->numtimesteps-1]    = NULL;
+	this->AddTriaTimeInput(this->numtimesteps-1,numindices,indices,values_in,interp_in);
+
+}
+/*}}}*/
+void TransientInput2::AddPentaTimeInput(IssmDouble time,int numindices,int* indices,IssmDouble* values_in,int interp_in){/*{{{*/
+
+	_error_("not implemented yet, look at TransientInput2::AddTriaTimeInput");
+
+}
+/*}}}*/
+void TransientInput2::AddTriaTimeInput(int step,int numindices,int* indices,IssmDouble* values_in,int interp_in){/*{{{*/
+
+	_assert_(step>=0 && step<this->numtimesteps);
+
+	/*Create it if necessary*/
+	if(this->inputs[step]){
+		if(this->inputs[step]->ObjectEnum()!=TriaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[step]->ObjectEnum()));
+	}
+	else{
+		this->inputs[step] = new TriaInput2(this->numberofelements_local,this->numberofvertices_local,interp_in);
+	}
+
+	/*Set input*/
+	TriaInput2* input = xDynamicCast<TriaInput2*>(this->inputs[step]);
+	input->SetInput(interp_in,numindices,indices,values_in);
+
+}
+/*}}}*/
+void TransientInput2::AddPentaTimeInput(int step,int numindices,int* indices,IssmDouble* values_in,int interp_in){/*{{{*/
+
+	_assert_(step>=0 && step<this->numtimesteps);
+
+	/*Create it if necessary*/
+	if(this->inputs[step]){
+		if(this->inputs[step]->ObjectEnum()!=PentaInput2Enum) _error_("cannot add Element values to a "<<EnumToStringx(this->inputs[step]->ObjectEnum()));
+	}
+	else{
+		this->inputs[step] = new PentaInput2(this->numberofelements_local,this->numberofvertices_local,interp_in);
+	}
+
+	/*Set input*/
+	PentaInput2* input = xDynamicCast<PentaInput2*>(this->inputs[step]);
+	input->SetInput(interp_in,numindices,indices,values_in);
+
+}
+/*}}}*/
+void TransientInput2::GetAllTimes(IssmDouble** ptimesteps,int* pnumtimesteps){/*{{{*/
+
+	if(ptimesteps){
+		*ptimesteps=xNew<IssmDouble>(this->numtimesteps);
+		xMemCpy(*ptimesteps,this->timesteps,this->numtimesteps);
+	}
+	if(pnumtimesteps){
+		*pnumtimesteps = this->numtimesteps;
+	}
+
+}
+/*}}}*/
+TriaInput2* TransientInput2::GetTriaInput(){/*{{{*/
+
+	IssmDouble time;
+	this->parameters->FindParam(&time,TimeEnum);
+	return this->GetTriaInput(time);
+
+}
+/*}}}*/
+TriaInput2* TransientInput2::GetTriaInput(IssmDouble time){/*{{{*/
+
+	/*Set current time input*/
+	this->SetCurrentTimeInput(time);
+	_assert_(this->current_input);
+
+	/*Cast and return*/
+	if(this->current_input->ObjectEnum()!=TriaInput2Enum){
+		_error_("Cannot return a TriaInput2");
+	}
+	return xDynamicCast<TriaInput2*>(this->current_input);
+
+}
+/*}}}*/
+TriaInput2* TransientInput2::GetTriaInput(IssmDouble start_time, IssmDouble end_time){/*{{{*/
+
+	/*Set current time input*/
+	this->SetAverageAsCurrentTimeInput(start_time,end_time);
+	_assert_(this->current_input);
+
+	/*Cast and return*/
+	if(this->current_input->ObjectEnum()!=TriaInput2Enum){
+		_error_("Cannot return a TriaInput2");
+	}
+	return xDynamicCast<TriaInput2*>(this->current_input);
+
+}
+/*}}}*/
+TriaInput2* TransientInput2::GetTriaInput(int offset){/*{{{*/
+
+	/*Check offset*/
+	if(offset<0 || offset>this->numtimesteps-1){
+		_error_("Cannot return input for offset "<<offset);
+	}
+	Input2* input = this->inputs[offset];
+
+	/*Cast and return*/
+	_assert_(input);
+	if(input->ObjectEnum()!=TriaInput2Enum) _error_("Cannot return a TriaInput2");
+	return xDynamicCast<TriaInput2*>(input);
+
+}
+/*}}}*/
+PentaInput2* TransientInput2::GetPentaInput(){/*{{{*/
+
+	IssmDouble time;
+	this->parameters->FindParam(&time,TimeEnum);
+	return this->GetPentaInput(time);
+}
+/*}}}*/
+PentaInput2* TransientInput2::GetPentaInput(IssmDouble time){/*{{{*/
+
+	/*Set current time input*/
+	this->SetCurrentTimeInput(time);
+	_assert_(this->current_input);
+
+	/*Cast and return*/
+	if(this->current_input->ObjectEnum()!=PentaInput2Enum){
+		_error_("Cannot return a PentaInput2");
+	}
+	return xDynamicCast<PentaInput2*>(this->current_input);
+
+}
+/*}}}*/
+PentaInput2* TransientInput2::GetPentaInput(int offset){/*{{{*/
+
+
+	/*Check offset*/
+	if(offset<0 || offset>this->numtimesteps-1){
+		_error_("Cannot return input for offset "<<offset);
+	}
+	Input2* input = this->inputs[offset];
+
+	/*Cast and return*/
+	if(input->ObjectEnum()!=PentaInput2Enum) _error_("Cannot return a PentaInput2");
+	return xDynamicCast<PentaInput2*>(input);
+
+}
+/*}}}*/
+void TransientInput2::SetCurrentTimeInput(IssmDouble time){/*{{{*/
+
+	/*First, recover current time from parameters: */
+	bool linear_interp;
+	this->parameters->FindParam(&linear_interp,TimesteppingInterpForcingsEnum);
+
+	/*Figure step out*/
+	int offset;
+	if(!binary_search(&offset,time,this->timesteps,this->numtimesteps)){
+		_error_("Input not found (is TransientInput sorted ?)");
+	}
+
+	if (offset==-1){
+
+		/*get values for the first time: */
+		_assert_(time<this->timesteps[0]);
+
+		/*If already processed return*/
+		if(this->current_step==0.) return;
+
+		/*Prepare input*/
+		if(this->current_input) delete this->current_input;
+		this->current_step = 0.;
+		this->current_input = this->inputs[0]->copy();
+
+	}
+	else if(offset==(this->numtimesteps-1) || !linear_interp){
+
+		/*get values for the last time: */
+		_assert_(time>=this->timesteps[offset]);
+
+		/*If already processed return*/
+		if(this->current_step==reCast<IssmDouble>(offset)) return;
+
+		/*Prepare input*/
+		if(this->current_input) delete this->current_input;
+		this->current_step  = reCast<IssmDouble>(offset);
+		this->current_input = this->inputs[offset]->copy();
+	}
+	else {
+
+		/*Interpolate */
+		_assert_(time>=this->timesteps[offset] && time<this->timesteps[offset+1]);
+
+		/*get values between two times [offset:offset+1[, Interpolate linearly*/
+		IssmDouble deltat=this->timesteps[offset+1]-this->timesteps[offset];
+		IssmDouble this_step = reCast<IssmDouble>(offset) + (time - this->timesteps[offset])/deltat;
+
+		/*If already processed return*/
+		if(this->current_step>this_step-1.e-5 && this->current_step<this_step+1.e-5) return;
+
+		/*Prepare input*/
+		if(this->current_input) delete this->current_input;
+		this->current_step = this_step;
+		IssmDouble alpha2=(time-this->timesteps[offset])/deltat;
+		IssmDouble alpha1=(1.0-alpha2);
+
+		Input2* input1=this->inputs[offset];
+		Input2* input2=this->inputs[offset+1];
+
+		this->current_input = input1->copy();
+		this->current_input->Scale(alpha1);
+		this->current_input->AXPY(input2,alpha2);
+	}
+
+}/*}}}*/
+void TransientInput2::SetAverageAsCurrentTimeInput(IssmDouble start_time,IssmDouble end_time){/*{{{*/
+
+	IssmDouble  dt;
+	IssmPDouble eps=1.0e-6;
+	IssmDouble  dtsum=0;
+	int         found,start_offset,end_offset;
+	int         averaging_method = 0;
+
+	/*go through the timesteps, and grab offset for start and end*/
+	IssmDouble temp = start_time-eps;
+	found=binary_search(&start_offset,temp,this->timesteps,this->numtimesteps);
+	if(!found) _error_("Input not found (is TransientInput sorted ?)");
+	temp = end_time+eps;
+	found=binary_search(&end_offset,temp,this->timesteps,this->numtimesteps);
+	if(!found) _error_("Input not found (is TransientInput sorted ?)");
+
+	int offset=start_offset;
+	if(this->current_input) delete this->current_input;
+	while(offset < end_offset){
+		if (offset==-1){
+			dt=this->timesteps[0]-start_time;
+			_assert_(start_time<this->timesteps[0]);
+		}
+		else if(offset==this->numtimesteps-1)dt=end_time-this->timesteps[offset];
+		else if(offset==start_offset && this->timesteps[offset]<start_time) dt=this->timesteps[offset+1]-start_time;
+		else if(offset==end_offset && this->timesteps[offset]>end_time) dt=end_time-this->timesteps[offset];
+		else dt=this->timesteps[offset+1]-this->timesteps[offset];
+		_assert_(dt>0.);
+
+		Input2* stepinput=this->inputs[offset+1];
+
+		switch(averaging_method){
+			case 0: /*Arithmetic mean*/
+				if(offset==start_offset){
+					this->current_input = stepinput->copy();
+					this->current_input->Scale(dt);
+				}
+				else{
+					this->current_input->AXPY(stepinput,dt);
+				}
+				break;
+			case 1: /*Geometric mean*/
+				_error_("Geometric not implemented yet");
+			case 2: /*Harmonic mean*/
+				_error_("Harmonic not implemented yet");
+			default:
+				_error_("averaging method is not recognised");
+		}
+		dtsum+=dt;
+		offset+=1;
+	}
+		/*Integration done, now normalize*/
+	switch(averaging_method){
+		case 0: //Arithmetic mean
+			this->current_input->Scale(1/(dtsum));
+			break;
+		case 1: /*Geometric mean*/
+			_error_("Geometric not implemented yet");
+		case 2: /*Harmonic mean*/
+			_error_("Harmonic not implemented yet");
+		default:
+			_error_("averaging method is not recognised");
+	}
+}/*}}}*/
+IssmDouble  TransientInput2::GetTimeByOffset(int offset){/*{{{*/
+	if(offset<0) offset=0;
+	_assert_(offset<this->numtimesteps);
+	return this->timesteps[offset];
+}
+/*}}}*/
+int  TransientInput2::GetTimeInputOffset(IssmDouble time){/*{{{*/
+
+	int offset;
+
+	/*go through the timesteps, and figure out which interval we
+	 *     *fall within. Then interpolate the values on this interval: */
+	int found=binary_search(&offset,time,this->timesteps,this->numtimesteps);
+	if(!found) _error_("Input not found (is TransientInput sorted ?)");
+
+	return offset;
+}
+/*}}}*/
Index: /issm/trunk/src/c/classes/Inputs2/TransientInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/TransientInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/TransientInput2.h	(revision 24686)
@@ -0,0 +1,65 @@
+/*! \file TransientInput2.h
+ *  \brief: header file for transientinput object
+ */
+
+#ifndef _TRANSIENTINPUT2_H_
+#define _TRANSIENTINPUT2_H_
+
+/*Headers:*/
+#include "./Input2.h"
+class Gauss;
+class Parameters;
+
+class TransientInput2: public Input2{
+
+	private:
+		int     numberofelements_local;
+		int     numberofvertices_local;
+
+	public:
+		int         enum_type;
+		int         numtimesteps;
+		Input2**    inputs;
+		IssmDouble *timesteps;
+		Parameters *parameters;     //to find current time.
+
+		IssmDouble  current_step;
+		Input2*     current_input;
+
+		/*TransientInput2 constructors, destructors: {{{*/
+		TransientInput2();
+		TransientInput2(int in_enum_type,int nbe,int nbv,IssmDouble* times,int N);
+		~TransientInput2();
+		void AddTimeInput(Input2* input,IssmDouble time); /*FIXME: remove!*/
+		void AddTriaTimeInput(IssmDouble time,int numindices,int* indices,IssmDouble* values_in,int interp_in);
+		void AddPentaTimeInput(IssmDouble time,int numindices,int* indices,IssmDouble* values_in,int interp_in);
+		void AddTriaTimeInput(int step,int numindices,int* indices,IssmDouble* values_in,int interp_in);
+		void AddPentaTimeInput(int step,int numindices,int* indices,IssmDouble* values_in,int interp_in);
+		/*}}}*/
+		/*Object virtual functions definitions:{{{*/
+		Input2* copy();
+		void    Configure(Parameters* params);
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*TransientInput2 management:*/
+		void         GetAllTimes(IssmDouble** ptimesteps,int* pnumtimesteps);
+		TriaInput2*  GetTriaInput();
+		TriaInput2*  GetTriaInput(IssmDouble time);
+		TriaInput2*  GetTriaInput(IssmDouble start_time,IssmDouble end_time);
+		TriaInput2*  GetTriaInput(int offset);
+		PentaInput2* GetPentaInput();
+		PentaInput2* GetPentaInput(IssmDouble time);
+		PentaInput2* GetPentaInput(int offset);
+		Input2*      GetTimeInput(IssmDouble time){_error_("This should not happen!");};
+		IssmDouble   GetTimeByOffset(int offset);
+		int          GetTimeInputOffset(IssmDouble time);
+		void         SetCurrentTimeInput(IssmDouble time);
+		void         SetAverageAsCurrentTimeInput(IssmDouble start_time,IssmDouble end_time);
+		/*numerics:*/
+
+};
+#endif  /* _TRANSIENTINPUT_H */
Index: /issm/trunk/src/c/classes/Inputs2/TriaInput2.cpp
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/TriaInput2.cpp	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/TriaInput2.cpp	(revision 24686)
@@ -0,0 +1,379 @@
+/*!\file TriaInput2.c
+ * \brief: implementation of the TriaInput2 object
+ */
+
+#ifdef HAVE_CONFIG_H
+	#include <config.h>
+#else
+#error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
+#endif
+
+#include "../classes.h"
+#include "../../shared/shared.h"
+#include "./TriaInput2.h"
+
+/*TriaInput2 constructors and destructor*/
+TriaInput2::TriaInput2(void){/*{{{*/
+
+	this->numberofelements_local = -1;
+	this->numberofvertices_local = -1;
+	this->isserved       = false;
+	this->isserved_collapsed= 0;
+	this->M = -1;
+	this->N = -1;
+	this->values         = NULL;
+	this->element_values = NULL;
+
+}/*}}}*/
+TriaInput2::TriaInput2(int nbe_in,int nbv_in,int interp_in){/*{{{*/
+
+	_assert_(nbe_in>0);
+	_assert_(nbe_in<1e11);
+	_assert_(nbv_in>0);
+	_assert_(nbv_in<1e11);
+	this->numberofelements_local = nbe_in;
+	this->numberofvertices_local = nbv_in;
+	this->isserved       = false;
+	this->isserved_collapsed = 0;
+
+	/*Reset takes care of the rest*/
+	this->Reset(interp_in);
+}/*}}}*/
+TriaInput2::~TriaInput2(){/*{{{*/
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+	if(this->values)         xDelete<IssmDouble>(this->values);
+}
+/*}}}*/
+void TriaInput2::Reset(int interp_in){/*{{{*/
+
+	/*Clean up*/
+	if(this->values)         xDelete<IssmDouble>(this->values);
+	if(this->element_values) xDelete<IssmDouble>(this->element_values);
+
+	/*Set interpolation*/
+	this->interpolation  = interp_in;
+
+	/*Create Sizes*/
+	if(this->interpolation==P1Enum){
+		this->M = this->numberofvertices_local;
+		this->N = 1;
+	}
+	else{
+		this->M = this->numberofelements_local;
+		this->N = TriaRef::NumberofNodes(interp_in);
+	}
+
+	/*Allocate Pointers*/
+	this->values         = xNewZeroInit<IssmDouble>(this->M*this->N);
+	this->element_values = xNewZeroInit<IssmDouble>(TriaRef::NumberofNodes(interp_in));
+}/*}}}*/
+
+/*Object virtual functions definitions:*/
+Input2* TriaInput2::copy() {/*{{{*/
+
+	TriaInput2* output = new TriaInput2(this->numberofelements_local,this->numberofvertices_local,this->interpolation);
+
+	xMemCpy<IssmDouble>(output->values,this->values,this->M*this->N);
+	xMemCpy<IssmDouble>(output->element_values,this->element_values,TriaRef::NumberofNodes(this->interpolation));
+
+	return output;
+}
+/*}}}*/
+void TriaInput2::DeepEcho(void){/*{{{*/
+	_printf_("TriaInput2 Echo:\n");
+	_printf_("   interpolation: "<<EnumToStringx(this->interpolation)<<"\n");
+	_printf_("   Size:          "<<M<<"x"<<N<<"\n");
+	_printf_("   isserved:      "<<(isserved?"true":"false") << "\n");
+	_printf_("   isserved_collapsed: "<<isserved_collapsed << "\n");
+	if(isserved){
+		_printf_("   current values:      ");
+		for(int i=0;i<3;i++) _printf_(" "<<this->element_values[i]);
+		_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+	}
+	printarray(this->values,this->M,this->N);
+	//_printf_(setw(15)<<"   TriaInput2 "<<setw(25)<<left<<EnumToStringx(this->enum_type)<<" "<<(value?"true":"false") << "\n");
+}
+/*}}}*/
+void TriaInput2::Echo(void){/*{{{*/
+	_printf_("TriaInput2 Echo:\n");
+	_printf_("   interpolation: "<<EnumToStringx(this->interpolation)<<"\n");
+	_printf_("   Size:          "<<M<<"x"<<N<<"\n");
+	_printf_("   isserved:      "<<(isserved?"true":"false") << "\n");
+	_printf_("   isserved_collapsed: "<<isserved_collapsed << "\n");
+	if(isserved){
+		_printf_("   current values:      ");
+		_printf_("[ ");
+		for(int i=0;i<TriaRef::NumberofNodes(this->interpolation);i++) _printf_(" "<<this->element_values[i]);
+		_printf_("] ("<<EnumToStringx(this->interpolation)<<")\n");
+	}
+}
+/*}}}*/
+int  TriaInput2::Id(void){/*{{{*/
+	return -1;
+}/*}}}*/
+void TriaInput2::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	MARSHALLING_ENUM(TriaInput2Enum);
+	MARSHALLING(this->numberofelements_local);
+	MARSHALLING(this->numberofvertices_local);
+	MARSHALLING(this->interpolation);
+	MARSHALLING(this->M);
+	MARSHALLING(this->N);
+	this->isserved = false;
+	this->isserved_collapsed = 0;
+	if(this->M*this->N){
+		MARSHALLING_DYNAMIC(this->values,IssmDouble,this->M*this->N);
+	}
+	else this->values = NULL;
+
+	if(marshall_direction == MARSHALLING_BACKWARD){
+		this->element_values = xNewZeroInit<IssmDouble>(TriaRef::NumberofNodes(this->interpolation));
+	}
+
+}
+/*}}}*/
+int  TriaInput2::ObjectEnum(void){/*{{{*/
+	return TriaInput2Enum;
+}
+/*}}}*/
+
+/*TriaInput2 management*/
+void TriaInput2::SetInput(int interp_in,int row,IssmDouble value_in){/*{{{*/
+
+	_assert_(this);
+	_assert_(row>=0); 
+	_assert_(row<this->M); 
+	_assert_(this->N==1);
+
+	this->values[row] = value_in;
+	this->isserved = false;
+}
+/*}}}*/
+void TriaInput2::SetInput(int interp_in,int numindices,int* indices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	if(interp_in==P1Enum && this->interpolation==P1Enum){
+		_assert_(this->N==1);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else if(interp_in==P0Enum && this->interpolation==P0Enum){
+		_assert_(this->N==1);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else if(this->interpolation!=P1Enum && interp_in==P1Enum){
+		this->Reset(interp_in);
+		for(int i=0;i<numindices;i++){
+			int row = indices[i];
+			_assert_(row>=0); 
+			_assert_(row<this->M); 
+			this->values[row] = values_in[i];
+		}
+	}
+	else{
+		_error_("Cannot convert "<<EnumToStringx(this->interpolation)<<" to "<<EnumToStringx(interp_in));
+	}
+	this->isserved = false;
+}
+/*}}}*/
+void TriaInput2::SetInput(int interp_in,int row,int numindices,IssmDouble* values_in){/*{{{*/
+
+	_assert_(this);
+	if(interp_in==this->interpolation){
+		_assert_(this->N==numindices);
+	}
+	else{
+		this->Reset(interp_in);
+		_assert_(this->N==numindices);
+	}
+	for(int i=0;i<numindices;i++) this->values[row*this->N+i] = values_in[i];
+	this->isserved = false;
+}
+/*}}}*/
+void TriaInput2::Serve(int numindices,int* indices){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N==1);
+
+	for(int i=0;i<numindices;i++){
+		int row = indices[i];
+		_assert_(row>=0); 
+		_assert_(row<this->M); 
+		this->element_values[i] = this->values[row];
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+	this->isserved_collapsed = 0;
+}
+/*}}}*/
+void TriaInput2::Serve(int row,int numindices){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N==numindices);
+	_assert_(row<this->M);
+	_assert_(row>=0);
+
+	for(int i=0;i<numindices;i++){
+		this->element_values[i] = this->values[row*this->N+i];
+	}
+
+	/*Set input as served*/
+	this->isserved = true;
+	this->isserved_collapsed = 0;
+} /*}}}*/
+void TriaInput2::ServeCollapsed(int row,int id1,int id2){/*{{{*/
+
+	_assert_(this);
+	_assert_(this->N>=3);
+	_assert_(row<this->M);
+	_assert_(row>=0);
+	_assert_(id1>=0 && id1<3);
+	_assert_(id2>=0 && id2<3);
+
+	this->element_values[0] = this->values[row*this->N+id1];
+	this->element_values[1] = this->values[row*this->N+id2];
+
+	/*Set input as served*/
+	this->isserved = true;
+	this->isserved_collapsed = 1;
+}/*}}}*/
+void TriaInput2::SetServeCollapsed(bool status){/*{{{*/
+	this->isserved_collapsed = 1;
+}/*}}}*/
+int  TriaInput2::GetInterpolation(){/*{{{*/
+	return this->interpolation;
+}/*}}}*/
+void TriaInput2::GetInputAverage(IssmDouble* pvalue){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 2;
+	IssmDouble numnodesd = reCast<int,IssmDouble>(numnodes);
+	IssmDouble value     = 0.;
+
+	for(int i=0;i<numnodes;i++) value+=this->element_values[i];
+	value = value/numnodesd;
+
+	*pvalue=value;
+}/*}}}*/
+IssmDouble TriaInput2::GetInputMin(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 2;
+	IssmDouble min=this->element_values[0];
+
+	for(int i=1;i<numnodes;i++){
+		if(this->element_values[i]<min) min=this->element_values[i];
+	}
+	return min;
+}/*}}}*/
+IssmDouble TriaInput2::GetInputMax(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 2;
+	IssmDouble max=this->element_values[0];
+
+	for(int i=1;i<numnodes;i++){
+		if(this->element_values[i]>max) max=this->element_values[i];
+	}
+	return max;
+}/*}}}*/
+IssmDouble TriaInput2::GetInputMaxAbs(void){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	int        numnodes  = this->NumberofNodes(this->interpolation);
+	if(this->isserved_collapsed) numnodes = 2;
+	IssmDouble maxabs=fabs(this->element_values[0]);
+
+	for(int i=1;i<numnodes;i++){
+		if(fabs(this->element_values[i])>maxabs) maxabs=fabs(this->element_values[i]);
+	}
+	return maxabs;
+}/*}}}*/
+void TriaInput2::GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+
+	if(this->isserved_collapsed){
+		_assert_(gauss->Enum()==GaussSegEnum);
+		if(this->interpolation==P0Enum){
+			derivativevalues[0] = 0.;
+		}
+		else{
+			SegRef temp;
+			temp.GetInputDerivativeValue(derivativevalues,this->element_values,xyz_list,(GaussSeg*)gauss,P1Enum);
+		}
+	}
+	else{
+		_assert_(gauss->Enum()==GaussTriaEnum);
+		TriaRef::GetInputDerivativeValue(derivativevalues,this->element_values,xyz_list,(GaussTria*)gauss,this->interpolation);
+	}
+}/*}}}*/
+void TriaInput2::GetInputValue(IssmDouble* pvalue,Gauss* gauss){/*{{{*/
+	_assert_(this);
+	_assert_(this->isserved);
+	if(this->isserved_collapsed){
+		_assert_(gauss->Enum()==GaussSegEnum);
+		if(this->interpolation==P0Enum){
+			*pvalue = this->element_values[0];
+		}
+		else{
+			SegRef temp;
+			temp.GetInputValue(pvalue,this->element_values,(GaussSeg*)gauss,P1Enum);
+		}
+	}
+	else{
+		_assert_(gauss->Enum()==GaussTriaEnum);
+		TriaRef::GetInputValue(pvalue,this->element_values,(GaussTria*)gauss,this->interpolation);
+	}
+}/*}}}*/
+int  TriaInput2::GetResultArraySize(void){/*{{{*/
+	return 1;
+}
+/*}}}*/
+int  TriaInput2::GetResultInterpolation(void){/*{{{*/
+	if(this->interpolation==P0Enum || this->interpolation==P0DGEnum){
+		return P0Enum;
+	}
+	return P1Enum;
+}/*}}}*/
+int  TriaInput2::GetResultNumberOfNodes(void){/*{{{*/
+	return this->N;
+}
+/*}}}*/
+void TriaInput2::Scale(IssmDouble alpha){/*{{{*/
+
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = alpha*this->values[i];
+	for(int i=0;i<TriaRef::NumberofNodes(this->interpolation);i++) this->element_values[i] = alpha*this->element_values[i];
+}
+/*}}}*/
+void TriaInput2::AXPY(Input2* xinput,IssmDouble alpha){/*{{{*/
+
+	/*xinput is of the same type, so cast it: */
+	if(xinput->ObjectEnum()!=TriaInput2Enum) _error_("Operation not permitted because xinput is of type " << EnumToStringx(xinput->ObjectEnum()));
+	TriaInput2* xtriainput=xDynamicCast<TriaInput2*>(xinput);
+	if(xtriainput->GetInterpolation()!=this->interpolation) _error_("Operation not permitted because xinput is of type " << EnumToStringx(xinput->ObjectEnum()));
+
+	/*Carry out the AXPY operation depending on type:*/
+	for(int i=0;i<this->M*this->N;i++) this->values[i] = alpha*xtriainput->values[i] + this->values[i];
+	for(int i=0;i<TriaRef::NumberofNodes(this->interpolation);i++) this->element_values[i] = alpha*xtriainput->element_values[i] + this->element_values[i];
+}
+/*}}}*/
+
+/*Object functions*/
Index: /issm/trunk/src/c/classes/Inputs2/TriaInput2.h
===================================================================
--- /issm/trunk/src/c/classes/Inputs2/TriaInput2.h	(revision 24686)
+++ /issm/trunk/src/c/classes/Inputs2/TriaInput2.h	(revision 24686)
@@ -0,0 +1,52 @@
+#ifndef _TRIAINPUT2_H_
+#define _TRIAINPUT2_H_
+
+/*Headers:*/
+#include "./ElementInput2.h"
+#include "../Elements/TriaRef.h"
+
+class TriaInput2: public ElementInput2, public TriaRef{
+
+	private:
+		int isserved_collapsed;
+		int collapsed_ids[2];
+	public:
+		/*TriaInput2 constructors, destructors: {{{*/
+		TriaInput2();
+		TriaInput2(int nbe_in,int nbv_in,int interp_in);
+		~TriaInput2();
+		/*}}}*/
+		/*Object virtual functions definitions:{{{ */
+		Input2 *copy();
+		void    DeepEcho();
+		void    Echo();
+		int     Id();
+		void    Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+		int     ObjectEnum();
+		/*}}}*/
+		/*TriaInput2 management: {{{*/
+		void SetInput(int interp_in,int row,IssmDouble value_in);
+		void SetInput(int interp_in,int numinds,int* rows,IssmDouble* values_in);
+		void SetInput(int interp_in,int row,int numinds,IssmDouble* values_in);
+		int  GetInterpolation();
+		void GetInputDerivativeValue(IssmDouble* derivativevalues, IssmDouble* xyz_list, Gauss* gauss);
+		void GetInputAverage(IssmDouble* pvalue);
+		IssmDouble GetInputMin();
+		IssmDouble GetInputMax();
+		IssmDouble GetInputMaxAbs();
+		TriaInput2* GetTriaInput(){return this;};
+		void GetInputValue(IssmDouble* pvalue,Gauss* gauss);
+		void Scale(IssmDouble scalar);
+		void AXPY(Input2* xinput,IssmDouble scalar);
+		void Serve(int numindices,int* indices);
+		void Serve(int row,int numindices);
+		void ServeCollapsed(int row,int id0,int in1);
+		void SetServeCollapsed(bool);
+		int  GetResultArraySize(void);
+		int  GetResultInterpolation(void);
+		int  GetResultNumberOfNodes(void);
+		/*}}}*/
+		void Reset(int interp_in);
+
+};
+#endif  /* _TRIAINPUT_H */
Index: /issm/trunk/src/c/classes/IoModel.cpp
===================================================================
--- /issm/trunk/src/c/classes/IoModel.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/IoModel.cpp	(revision 24686)
@@ -141,4 +141,5 @@
 	this->my_hedges=NULL;
 	this->my_vertices=NULL;
+	this->my_vertices_lids=NULL;
 	this->epart=NULL;
 
@@ -206,12 +207,13 @@
 
 	/*Initialize permanent data: */
-	this->my_elements = NULL;
-	this->my_faces    = NULL;
-	this->my_vfaces   = NULL;
-	this->my_edges    = NULL;
-	this->my_vedges   = NULL;
-	this->my_hedges   = NULL;
-	this->my_vertices = NULL;
-	this->epart       = NULL;
+	this->my_elements      = NULL;
+	this->my_faces         = NULL;
+	this->my_vfaces        = NULL;
+	this->my_edges         = NULL;
+	this->my_vedges        = NULL;
+	this->my_hedges        = NULL;
+	this->my_vertices      = NULL;
+	this->my_vertices_lids = NULL;
+	this->epart            = NULL;
 
 	FindConstant(&this->domaintype,"md.mesh.domain_type");
@@ -264,4 +266,5 @@
 	xDelete<bool>(this->my_hedges);
 	xDelete<bool>(this->my_vertices);
+	xDelete<int>(this->my_vertices_lids);
 	xDelete<int>(this->epart);
 
@@ -1594,5 +1597,5 @@
 }
 /*}}}*/
-void  IoModel::FetchDataToInput(Elements* elements,const char* vector_name,int input_enum,IssmDouble default_value){/*{{{*/
+void  IoModel::FetchDataToInput(Inputs2* inputs2,Elements* elements,const char* vector_name,int input_enum,IssmDouble default_value){/*{{{*/
 
 	/*First, look whether it is not already loaded in this->data*/
@@ -1604,5 +1607,5 @@
 			for(int i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputCreate(iodata->data,this,iodata->M,iodata->N,iodata->layout,input_enum,iodata->code);//we need i to index into elements.
+				element->InputCreate(iodata->data,inputs2,this,iodata->M,iodata->N,iodata->layout,input_enum,iodata->code);//we need i to index into elements.
 			}
 			return;
@@ -1625,6 +1628,10 @@
 	for(int i=0;i<elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-		if(!doublearray) element->AddInput(input_enum,&default_value,P0Enum);
-		else             element->InputCreate(doublearray,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
+		if(!doublearray){
+			element->SetElementInput(inputs2,input_enum,default_value);
+		}
+		else{
+			element->InputCreate(doublearray,inputs2,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
+		}
 	}
 
@@ -1633,5 +1640,5 @@
 }
 /*}}}*/
-void  IoModel::FetchDataToInput(Elements* elements,const char* vector_name,int input_enum){/*{{{*/
+void  IoModel::FetchDataToInput(Inputs2* inputs2,Elements* elements,const char* vector_name,int input_enum){/*{{{*/
 
 	/*First, look whether it is not already loaded in this->data*/
@@ -1642,5 +1649,5 @@
 			for(int i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputCreate(iodata->data,this,iodata->M,iodata->N,iodata->layout,input_enum,iodata->code);//we need i to index into elements.
+				element->InputCreate(iodata->data,inputs2,this,iodata->M,iodata->N,iodata->layout,input_enum,iodata->code);//we need i to index into elements.
 			}
 			return;
@@ -1656,5 +1663,4 @@
 	int         integer;
 	IssmDouble  scalar;
-	char       *string           = NULL;
 	IssmDouble *doublearray = NULL;
 	int         M,N;
@@ -1668,5 +1674,5 @@
 			for(i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputUpdateFromConstant(boolean,input_enum);
+				element->SetBoolInput(inputs2,input_enum,boolean);
 			}
 			break;
@@ -1675,5 +1681,5 @@
 			for(i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputUpdateFromConstant(integer,input_enum);
+				element->SetIntInput(inputs2,input_enum,integer);
 			}
 			break;
@@ -1682,5 +1688,5 @@
 			for(i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputUpdateFromConstant(scalar,input_enum);
+				element->SetElementInput(inputs2,input_enum,scalar);
 			}
 			break;
@@ -1690,5 +1696,5 @@
 			for(i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputCreate(doublearray,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
+				element->InputCreate(doublearray,inputs2,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
 			}
 			break;
@@ -1698,5 +1704,5 @@
 			for(i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputCreate(doublearray,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
+				element->InputCreate(doublearray,inputs2,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
 			}
 			break;
@@ -1707,5 +1713,5 @@
 			for(i=0;i<elements->Size();i++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-				element->InputCreate(doublearray,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
+				element->InputCreate(doublearray,inputs2,this,M,N,vector_layout,input_enum,code);//we need i to index into elements.
 			}
 			break;
@@ -1716,5 +1722,67 @@
 	/*Free ressources*/
 	xDelete<IssmDouble>(doublearray);
-	xDelete<char>(string);
+}
+/*}}}*/
+void  IoModel::FetchDataToDatasetInput(Inputs2* inputs2,Elements* elements,const char* vector_name,int input_enum){/*{{{*/
+
+	/*First, look whether it is not already loaded in this->data*/
+	vector<IoData*>::iterator iter;
+	for(iter=data.begin();iter<data.end();iter++){
+		IoData* iodata=*iter;
+		if(strcmp(iodata->name,vector_name)==0){
+			for(int i=0;i<elements->Size();i++){
+				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+				_error_("to be implemented...");
+				//element->InputCreate(iodata->data,inputs2,this,iodata->M,iodata->N,iodata->layout,input_enum,iodata->code);//we need i to index into elements.
+			}
+			return;
+		}
+	}
+
+	/*intermediary: */
+	int         code,vector_layout;
+	IssmDouble *doublearray = NULL;
+	int         M,N;
+
+	/*First of, find the record for the name, and get code  of data type: */
+	this->SetFilePointerToData(&code,&vector_layout,vector_name);
+
+	switch(code){
+		case 1: //boolean constant
+			_error_("not implemented yet");
+			break;
+		case 2: //integer constant
+			_error_("not implemented yet");
+			break;
+		case 3: //IssmDouble constant
+			_error_("not implemented yet");
+			break;
+		case 5: //boolean vector
+			_error_("not implemented yet");
+			break;
+		case 6: //int vector
+			_error_("not implemented yet");
+			break;
+		case 7: //IssmDouble vector
+			  {
+			this->FetchData(&doublearray,&M,&N,vector_name);
+			if(!doublearray) _error_("\""<<vector_name<<"\" not found in binary file");
+
+			int* ids = xNew<int>(N);
+			for(int i=0;i<N;i++) ids[i] = i;
+
+			for(int i=0;i<elements->Size();i++){
+				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+				element->DatasetInputCreate(doublearray,M,N,ids,N,inputs2,this,input_enum);
+			}
+			xDelete<int>(ids);
+			  }
+			break;
+		default:
+			_error_("data code " << code << " not supported yet (detected while processing \""<<vector_name<<"\")");
+			break;
+	}
+	/*Free ressources*/
+	xDelete<IssmDouble>(doublearray);
 }
 /*}}}*/
@@ -2592,5 +2660,5 @@
 	}
 	ISSM_MPI_Bcast(&found,1,ISSM_MPI_INT,0,IssmComm::GetComm());
-	if(!found)_error_("could not find data with name \"" << data_name << "\" in binary file");
+	if(!found) _error_("could not find data with name \"" << data_name << "\" in binary file");
 
 	/*Broadcast code and vector type: */
Index: /issm/trunk/src/c/classes/IoModel.h
===================================================================
--- /issm/trunk/src/c/classes/IoModel.h	(revision 24685)
+++ /issm/trunk/src/c/classes/IoModel.h	(revision 24686)
@@ -13,6 +13,7 @@
 class Parameters;
 class Elements;
+class Inputs2;
 class Param;
-class Option;
+class Options;
 
 class IoConstant { /*holds single IssmDouble, int, bool and char from input*/
@@ -70,4 +71,5 @@
 		bool *my_hedges;
 		bool *my_vertices;
+		int  *my_vertices_lids;
 		int  *epart;
 
@@ -137,6 +139,7 @@
 		void        FetchData(Options *options,const char* data_name);
 		void        FetchData(int num,...);
-		void        FetchDataToInput(Elements* elements,const char* vector_name,int input_enum);
-		void        FetchDataToInput(Elements* elements,const char* vector_name,int input_enum,IssmDouble default_value);
+		void        FetchDataToInput(Inputs2* inputs2,Elements* elements,const char* vector_name,int input_enum);
+		void        FetchDataToInput(Inputs2* inputs2,Elements* elements,const char* vector_name,int input_enum,IssmDouble default_value);
+		void        FetchDataToDatasetInput(Inputs2* inputs2,Elements* elements,const char* vector_name,int input_enum);
 		void        FetchIndependent(const char* dependent_name);
 		void        FetchMultipleData(char***   pstringarray,int* pnumstrings,const char* data_name);
Index: /issm/trunk/src/c/classes/Loads/Channel.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Channel.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Channel.cpp	(revision 24686)
@@ -387,11 +387,11 @@
 	IssmDouble c_t       = element->FindParam(HydrologyPressureMeltCoefficientEnum);
 
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum); _assert_(ks_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	Input2* h_input      = element->GetInput2(HydrologySheetThicknessEnum);_assert_(h_input);
+	Input2* H_input      = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input      = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* B_input      = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input      = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* ks_input     = element->GetInput2(HydrologySheetConductivityEnum); _assert_(ks_input);
+	Input2* phi_input    = element->GetInput2(HydraulicPotentialEnum);         _assert_(phi_input);
 
 	/*Get tangent vector*/
@@ -518,11 +518,11 @@
 	IssmDouble c_t       = element->FindParam(HydrologyPressureMeltCoefficientEnum);
 
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum); _assert_(ks_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	Input2* h_input      = element->GetInput2(HydrologySheetThicknessEnum);_assert_(h_input);
+	Input2* H_input      = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input      = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* B_input      = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input      = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* ks_input     = element->GetInput2(HydrologySheetConductivityEnum); _assert_(ks_input);
+	Input2* phi_input    = element->GetInput2(HydraulicPotentialEnum);         _assert_(phi_input);
 
 	/*Get tangent vector*/
@@ -630,11 +630,11 @@
 	IssmDouble dt        = element->FindParam(TimesteppingTimeStepEnum);
 
-	Input* h_input      = element->GetInput(HydrologySheetThicknessEnum);_assert_(h_input);
-	Input* H_input      = element->GetInput(ThicknessEnum); _assert_(H_input);
-	Input* b_input      = element->GetInput(BedEnum); _assert_(b_input);
-	Input* B_input      = element->GetInput(MaterialsRheologyBEnum);         _assert_(B_input);
-	Input* n_input      = element->GetInput(MaterialsRheologyNEnum);         _assert_(n_input);
-	Input* ks_input     = element->GetInput(HydrologySheetConductivityEnum); _assert_(ks_input);
-	Input* phi_input    = element->GetInput(HydraulicPotentialEnum);         _assert_(phi_input);
+	Input2* h_input      = element->GetInput2(HydrologySheetThicknessEnum);_assert_(h_input);
+	Input2* H_input      = element->GetInput2(ThicknessEnum); _assert_(H_input);
+	Input2* b_input      = element->GetInput2(BedEnum); _assert_(b_input);
+	Input2* B_input      = element->GetInput2(MaterialsRheologyBEnum);         _assert_(B_input);
+	Input2* n_input      = element->GetInput2(MaterialsRheologyNEnum);         _assert_(n_input);
+	Input2* ks_input     = element->GetInput2(HydrologySheetConductivityEnum); _assert_(ks_input);
+	Input2* phi_input    = element->GetInput2(HydraulicPotentialEnum);         _assert_(phi_input);
 
 	/*Get tangent vector*/
Index: /issm/trunk/src/c/classes/Loads/Friction.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Friction.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Friction.cpp	(revision 24686)
@@ -74,6 +74,6 @@
 
 	/*Recover parameters: */
-	element->GetInputValue(&q_exp,FrictionQEnum);
-	element->GetInputValue(&C_param,FrictionCEnum);
+	element->GetInputValue(&q_exp,gauss,FrictionQEnum);
+	element->GetInputValue(&C_param,gauss,FrictionCEnum);
 	element->GetInputValue(&As,gauss,FrictionAsEnum);
 	element->GetInputValue(&n,gauss,MaterialsRheologyNEnum);
@@ -142,6 +142,6 @@
 
 	/*Recover parameters: */
-	element->GetInputValue(&drag_p,FrictionPEnum);
-	element->GetInputValue(&drag_q,FrictionQEnum);
+	element->GetInputValue(&drag_p,gauss,FrictionPEnum);
+	element->GetInputValue(&drag_q,gauss,FrictionQEnum);
 	element->GetInputValue(&drag_coefficient, gauss,FrictionCoefficientEnum);
 
@@ -224,6 +224,6 @@
 
 	/*Recover parameters: */
-	element->GetInputValue(&drag_p,FrictionPEnum);
-	element->GetInputValue(&drag_q,FrictionQEnum);
+	element->GetInputValue(&drag_p,gauss,FrictionPEnum);
+	element->GetInputValue(&drag_q,gauss,FrictionQEnum);
 	element->GetInputValue(&thickness, gauss,ThicknessEnum);
 	element->GetInputValue(&base, gauss,BaseEnum);
@@ -284,6 +284,6 @@
 
 	/*Recover parameters: */
-	element->GetInputValue(&q_exp,FrictionQEnum);
-	element->GetInputValue(&C_param,FrictionCEnum);
+	element->GetInputValue(&q_exp,gauss,FrictionQEnum);
+	element->GetInputValue(&C_param,gauss,FrictionCEnum);
 	element->GetInputValue(&As,gauss,FrictionAsEnum);
 	element->GetInputValue(&n,gauss,MaterialsRheologyNEnum);
@@ -379,6 +379,6 @@
 
 	/*Intermediaries: */
-	IssmDouble  T,Tpmp,deltaT,deltaTref,pressure;
-	IssmDouble  alpha2,time,gamma;
+	IssmDouble  T,Tpmp,deltaT,deltaTref,pressure,diff,drag_coefficient;
+	IssmDouble  alpha2,time,gamma,ref,alp_new,alphascaled;
 	const IssmDouble yts = 365*24*3600.;
 
@@ -388,4 +388,9 @@
 	/*Get delta Refs*/
 	element->GetInputValue(&deltaTref,gauss,FrictionPressureAdjustedTemperatureEnum);
+	element->GetInputValue(&drag_coefficient, gauss,FrictionCoefficientEnum);
+	/*New*/
+	/*element->GetInputValue(&deltaTrefsfc,gauss,FrictionSurfaceTemperatureEnum);
+	 *    element->GetInputValue(&Tpdd,gauss,TemperaturePDDEnum);
+	 *       */
 
 	/*Compute delta T*/
@@ -395,20 +400,19 @@
 	deltaT = T-Tpmp;
 
+
 	/*Compute gamma*/
 	element->parameters->FindParam(&time,TimeEnum);
 	element->parameters->FindParam(&gamma,FrictionGammaEnum);
-	//if(time<25e3*yts){
-	//	gamma = 10.;
-	//}
-	//else{
-	//	gamma = 5.;
-	//}
-	//gamma = 5.;
-
-	/*Compute scaling parameter*/
-	alpha2 = alpha2 * exp((deltaTref - deltaT)/(2*gamma));
-
-	/*Assign output pointers:*/
-	*palpha2=alpha2;
+
+	ref = exp(deltaTref/gamma);
+	alp_new = ref/exp(deltaT/gamma);
+
+	alphascaled = sqrt(alp_new)*drag_coefficient;
+	if (alphascaled > 300) alp_new = (300/drag_coefficient)*(300/drag_coefficient);
+
+	alp_new=alp_new*alpha2;
+
+	/*Assign output pointers:*/
+	*palpha2=alp_new;
 }/*}}}*/
 void Friction::GetAlpha2Viscous(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
@@ -424,6 +428,6 @@
 
 	/*Recover parameters: */
-	element->GetInputValue(&drag_p,FrictionPEnum);
-	element->GetInputValue(&drag_q,FrictionQEnum);
+	element->GetInputValue(&drag_p,gauss,FrictionPEnum);
+	element->GetInputValue(&drag_q,gauss,FrictionQEnum);
 	element->GetInputValue(&drag_coefficient, gauss,FrictionCoefficientEnum);
 
@@ -458,6 +462,6 @@
 	/*Recover parameters: */
 	element->parameters->FindParam(&F,FrictionFEnum);
-	element->GetInputValue(&drag_p,FrictionPEnum);
-	element->GetInputValue(&drag_q,FrictionQEnum);
+	element->GetInputValue(&drag_p,gauss,FrictionPEnum);
+	element->GetInputValue(&drag_q,gauss,FrictionQEnum);
 	element->GetInputValue(&thickness, gauss,ThicknessEnum);
 	element->GetInputValue(&base, gauss,BaseEnum);
@@ -499,5 +503,5 @@
 	/*Recover parameters: */
 	element->GetInputValue(&C,gauss,FrictionCEnum);
-	element->GetInputValue(&m,FrictionMEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
 
 	/*Get velocity magnitude*/
@@ -606,5 +610,5 @@
 void Friction::GetAlpha2Schoof(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
 
-	/*This routine calculates the basal friction coefficient 
+	/*This routine calculates the basal friction coefficient
 	 *
 	 *               C |u_b|^(m-1)
@@ -620,5 +624,5 @@
 	element->GetInputValue(&Cmax,gauss,FrictionCmaxEnum);
 	element->GetInputValue(&C,gauss,FrictionCEnum);
-	element->GetInputValue(&m,FrictionMEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
 
 	/*Get effective pressure and velocity magnitude*/
@@ -639,5 +643,5 @@
 void Friction::GetAlpha2Tsai(IssmDouble* palpha2, Gauss* gauss){/*{{{*/
 
-	/*This routine calculates the basal friction coefficient 
+	/*This routine calculates the basal friction coefficient
 	 *
 	 * alpha2= min(C |ub|^m , f N ) / |ub|
@@ -651,5 +655,5 @@
 	element->GetInputValue(&f,gauss,FrictionfEnum);
 	element->GetInputValue(&C,gauss,FrictionCEnum);
-	element->GetInputValue(&m,FrictionMEnum);
+	element->GetInputValue(&m,gauss,FrictionMEnum);
 
 	/*Get effective pressure and velocity magnitude*/
@@ -680,49 +684,60 @@
 	IssmDouble  thickness,base,sealevel;
 	IssmDouble  p_ice,p_water;
-	IssmDouble  Neff;
+	IssmDouble  Neff,Neff_limit;
 
 	/*Recover parameters: */
 	element->parameters->FindParam(&coupled_flag,FrictionCouplingEnum);
+	element->parameters->FindParam(&Neff_limit,FrictionEffectivePressureLimitEnum);
 
 	/*From base and thickness, compute effective pressure when drag is viscous, or get Neff from forcing:*/
 	switch(coupled_flag){
 		case 0:{
-					 element->GetInputValue(&thickness, gauss,ThicknessEnum);
-					 element->GetInputValue(&base, gauss,BaseEnum);
-					 element->GetInputValue(&sealevel, gauss,SealevelEnum);
-					 IssmDouble rho_water = element->FindParam(MaterialsRhoSeawaterEnum);
-					 IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
-					 IssmDouble gravity   = element->FindParam(ConstantsGEnum);
-					 p_ice   = gravity*rho_ice*thickness;
-					 p_water = rho_water*gravity*(sealevel-base);
-					 Neff = p_ice - p_water;
-				 }
-			  break;
+			element->GetInputValue(&thickness, gauss,ThicknessEnum);
+			element->GetInputValue(&base, gauss,BaseEnum);
+			element->GetInputValue(&sealevel, gauss,SealevelEnum);
+			IssmDouble rho_water = element->FindParam(MaterialsRhoSeawaterEnum);
+			IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+			IssmDouble gravity   = element->FindParam(ConstantsGEnum);
+			p_ice   = gravity*rho_ice*thickness;
+			p_water = rho_water*gravity*(sealevel-base);
+			Neff = p_ice - p_water;
+		}
+			break;
 		case 1:{
-					 element->GetInputValue(&thickness, gauss,ThicknessEnum);
-					 IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
-					 IssmDouble gravity   = element->FindParam(ConstantsGEnum);
-					 p_ice   = gravity*rho_ice*thickness;
-					 p_water = 0.;
-					 Neff = p_ice - p_water;
-				 }
-			  break;
+			element->GetInputValue(&thickness, gauss,ThicknessEnum);
+			IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+			IssmDouble gravity   = element->FindParam(ConstantsGEnum);
+			p_ice   = gravity*rho_ice*thickness;
+			p_water = 0.;
+			Neff = p_ice - p_water;
+		}
+			break;
 		case 2:{
-					 element->GetInputValue(&thickness, gauss,ThicknessEnum);
-					 element->GetInputValue(&base, gauss,BaseEnum);
-					 element->GetInputValue(&sealevel, gauss,SealevelEnum);
-					 IssmDouble rho_water = element->FindParam(MaterialsRhoSeawaterEnum);
-					 IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
-					 IssmDouble gravity   = element->FindParam(ConstantsGEnum);
-					 p_ice   = gravity*rho_ice*thickness;
-					 p_water = max(0.,rho_water*gravity*(sealevel-base));
-					 Neff = p_ice - p_water;
-				 }
-			  break;
-		case 3:
+			element->GetInputValue(&thickness, gauss,ThicknessEnum);
+			element->GetInputValue(&base, gauss,BaseEnum);
+			element->GetInputValue(&sealevel, gauss,SealevelEnum);
+			IssmDouble rho_water = element->FindParam(MaterialsRhoSeawaterEnum);
+			IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+			IssmDouble gravity   = element->FindParam(ConstantsGEnum);
+			p_ice   = gravity*rho_ice*thickness;
+			p_water = max(0.,rho_water*gravity*(sealevel-base));
+			Neff = p_ice - p_water;
+		}
+			break;
+		case 3:{
 			element->GetInputValue(&Neff,gauss,FrictionEffectivePressureEnum);
-			break;
-		case 4:
+			element->GetInputValue(&thickness, gauss,ThicknessEnum);
+			IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+			IssmDouble gravity   = element->FindParam(ConstantsGEnum);
+			p_ice   = gravity*rho_ice*thickness;
+		}
+			break;
+		case 4:{
 			element->GetInputValue(&Neff,gauss,EffectivePressureEnum);
+			element->GetInputValue(&thickness, gauss,ThicknessEnum);
+			IssmDouble rho_ice   = element->FindParam(MaterialsRhoIceEnum);
+			IssmDouble gravity   = element->FindParam(ConstantsGEnum);
+			p_ice   = gravity*rho_ice*thickness;
+		}
 			break;
 		default:
@@ -731,5 +746,5 @@
 
 	/*Make sure Neff is positive*/
-	if(Neff<0.) Neff=0.;
+	if(Neff<Neff_limit*p_ice) Neff=Neff_limit*p_ice;
 
 	/*Return effective pressure*/
Index: /issm/trunk/src/c/classes/Loads/Loads.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Loads.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Loads.cpp	(revision 24686)
@@ -19,4 +19,6 @@
 #include "../../shared/Enum/EnumDefinitions.h"
 #include "../../shared/Exceptions/exceptions.h"
+#include "../../shared/MemOps/MemOps.h"
+#include "../../shared/io/Marshalling/Marshalling.h"
 #include "./Loads.h"
 #include "./Load.h"
@@ -27,5 +29,7 @@
 /*Object constructors and destructor*/
 Loads::Loads(){/*{{{*/
-	enum_type=LoadsEnum;
+	this->enum_type=LoadsEnum;
+	this->numrifts     = 0;
+	this->numpenalties = 0;
 	return;
 }
@@ -36,16 +40,83 @@
 /*}}}*/
 
+Loads* Loads::Copy() {/*{{{*/
+
+	int num_proc = IssmComm::GetSize();
+
+	/*Copy dataset*/
+	Loads* output=new Loads();
+	output->sorted=this->sorted;
+	output->numsorted=this->numsorted;
+	output->presorted=this->presorted;
+	for(vector<Object*>::iterator obj=this->objects.begin() ; obj < this->objects.end(); obj++ ){
+		output->AddObject((*obj)->copy());
+	}
+
+	/*Build id_offsets and sorted_ids*/
+	int objsize = this->numsorted;
+	output->id_offsets=NULL;
+	output->sorted_ids=NULL;
+	if(this->sorted && objsize>0 && this->id_offsets){	
+		output->id_offsets=xNew<int>(objsize);
+		xMemCpy<int>(output->id_offsets,this->id_offsets,objsize);
+	}
+	if(this->sorted && objsize>0 && this->sorted_ids){
+		output->sorted_ids=xNew<int>(objsize);
+		xMemCpy<int>(output->sorted_ids,this->sorted_ids,objsize);
+	}
+
+	/*Copy other fields*/
+	output->numrifts = this->numrifts;
+	output->numpenalties = this->numpenalties;
+
+	return output;
+}
+/*}}}*/
+void  Loads::Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction){ /*{{{*/
+
+	int num_procs=IssmComm::GetSize();
+	int test = num_procs;
+	MARSHALLING_ENUM(LoadsEnum);
+	MARSHALLING(numrifts);
+	MARSHALLING(numpenalties);
+
+	DataSet::Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
+}
+/*}}}*/
+
 /*Numerics:*/
 void Loads::Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){/*{{{*/
 
 	vector<Object*>::iterator object;
-	Load* load=NULL;
+	for(object=objects.begin() ; object < objects.end(); object++){
+		Load* load=xDynamicCast<Load*>(*object);
+		load->Configure(elements,loads,nodes,vertices,materials,parameters);
+	}
+}
+/*}}}*/
+void Loads::Finalize(){/*{{{*/
 
-	for ( object=objects.begin() ; object < objects.end(); object++ ){
+	/*Count Rifts and penalties*/
+	int ispenalty=0;
+	int isrift=0;
+	int allcount;
 
-		load=xDynamicCast<Load*>(*object);
-		load->Configure(elements,loads,nodes,vertices,materials,parameters);
+	/*Now go through all loads, and get how many nodes they own, unless they are clone nodes: */
+	for(int i=0;i<this->Size();i++){
+		Load* load=xDynamicCast<Load*>(this->GetObjectByOffset(i));
+		if(load->IsPenalty()){
+			ispenalty++;
+		}
+      if(load->ObjectEnum()==RiftfrontEnum){
+         isrift++;
+      }
+	}
 
-	}
+	/*Grab sum of all cpus: */
+	ISSM_MPI_Allreduce((void*)&ispenalty,(void*)&allcount,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm());
+	this->numpenalties = allcount;
+
+	ISSM_MPI_Allreduce((void*)&isrift,(void*)&allcount,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm());
+	this->numrifts= allcount;
 
 }
@@ -53,18 +124,5 @@
 bool Loads::IsPenalty(){/*{{{*/
 
-	int ispenalty=0;
-	int allispenalty=0;
-
-	/*Now go through all loads, and get how many nodes they own, unless they are clone nodes: */
-	for(int i=0;i<this->Size();i++){
-		Load* load=xDynamicCast<Load*>(this->GetObjectByOffset(i));
-		if(load->IsPenalty()) ispenalty++;
-	}
-
-	/*Grab sum of all cpus: */
-	ISSM_MPI_Allreduce((void*)&ispenalty,(void*)&allispenalty,1,ISSM_MPI_INT,ISSM_MPI_SUM,IssmComm::GetComm());
-	ispenalty=allispenalty;
-
-	if(ispenalty)
+	if(this->numpenalties>0)
 	 return true;
 	else
Index: /issm/trunk/src/c/classes/Loads/Loads.h
===================================================================
--- /issm/trunk/src/c/classes/Loads/Loads.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Loads.h	(revision 24686)
@@ -18,11 +18,19 @@
 	public:
 
+		int numrifts;
+		int numpenalties;
+
 		/*constructors, destructors*/
 		Loads();
 		~Loads();
 
+		/*Objects virtual functions*/
+		Loads* Copy();
+		void   Marshall(char** pmarshalled_data,int* pmarshalled_data_size, int marshall_direction);
+
 		/*numerics*/
 		void  Configure(Elements* elements,Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters);
 		bool  IsPenalty();
+		void  Finalize();
 		int   MaxNumNodes();
 		int   NumberOfLoads();
Index: /issm/trunk/src/c/classes/Loads/Neumannflux.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Neumannflux.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Neumannflux.cpp	(revision 24686)
@@ -361,5 +361,5 @@
 	/*Retrieve all inputs and parameters*/
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* flux_input = tria->inputs->GetInput(HydrologyNeumannfluxEnum);  _assert_(flux_input); 
+	Input2* flux_input = tria->GetInput2(HydrologyNeumannfluxEnum);  _assert_(flux_input); 
 
 	/*Check wether it is an inflow or outflow BC (0 is the middle of the segment)*/
@@ -405,5 +405,5 @@
 	/*Retrieve all inputs and parameters*/
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* flux_input = tria->inputs->GetInput(HydrologyNeumannfluxEnum);  _assert_(flux_input); 
+	Input2* flux_input = tria->GetInput2(HydrologyNeumannfluxEnum);  _assert_(flux_input); 
 
 	/*Check wether it is an inflow or outflow BC (0 is the middle of the segment)*/
Index: /issm/trunk/src/c/classes/Loads/Numericalflux.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Numericalflux.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Numericalflux.cpp	(revision 24686)
@@ -516,6 +516,6 @@
 	/*Retrieve all inputs and parameters*/
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* vxaverage_input=tria->inputs->GetInput(VxEnum);
-	Input* vyaverage_input=tria->inputs->GetInput(VyEnum);
+	Input2* vxaverage_input=tria->GetInput2(VxEnum); _assert_(vxaverage_input); 
+	Input2* vyaverage_input=tria->GetInput2(VyEnum); _assert_(vyaverage_input); 
 	GetNormal(&normal[0],xyz_list);
 
@@ -591,6 +591,6 @@
 	/*Retrieve all inputs and parameters*/
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* vxaverage_input=tria->inputs->GetInput(VxEnum);
-	Input* vyaverage_input=tria->inputs->GetInput(VyEnum);
+	Input2* vxaverage_input=tria->GetInput2(VxEnum); _assert_(vxaverage_input); 
+	Input2* vyaverage_input=tria->GetInput2(VyEnum); _assert_(vyaverage_input); 
 	GetNormal(&normal[0],xyz_list);
 
@@ -688,6 +688,6 @@
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
 	IssmDouble dt = parameters->FindParam(TimesteppingTimeStepEnum);
-	Input* vxaverage_input=tria->inputs->GetInput(VxEnum); _assert_(vxaverage_input);
-	Input* vyaverage_input=tria->inputs->GetInput(VyEnum); _assert_(vyaverage_input);
+	Input2* vxaverage_input=tria->GetInput2(VxEnum); _assert_(vxaverage_input);
+	Input2* vyaverage_input=tria->GetInput2(VyEnum); _assert_(vyaverage_input);
 	GetNormal(&normal[0],xyz_list);
 
@@ -764,6 +764,6 @@
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
 	IssmDouble dt = parameters->FindParam(TimesteppingTimeStepEnum);
-	Input* vxaverage_input=tria->inputs->GetInput(VxEnum);
-	Input* vyaverage_input=tria->inputs->GetInput(VyEnum);
+	Input2* vxaverage_input=tria->GetInput2(VxEnum); _assert_(vxaverage_input); 
+	Input2* vyaverage_input=tria->GetInput2(VyEnum); _assert_(vyaverage_input); 
 	GetNormal(&normal[0],xyz_list);
 
@@ -866,7 +866,7 @@
 	/*Retrieve all inputs and parameters*/
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
-	Input* vxaverage_input = tria->inputs->GetInput(VxEnum);        _assert_(vxaverage_input);
-	Input* vyaverage_input = tria->inputs->GetInput(VyEnum);        _assert_(vyaverage_input);
-	Input* thickness_input = tria->inputs->GetInput(ThicknessEnum); _assert_(thickness_input);
+	Input2* vxaverage_input = tria->GetInput2(VxEnum);        _assert_(vxaverage_input);
+	Input2* vyaverage_input = tria->GetInput2(VyEnum);        _assert_(vyaverage_input);
+	Input2* thickness_input = tria->GetInput2(ThicknessEnum); _assert_(thickness_input);
 	GetNormal(&normal[0],xyz_list);
 
@@ -947,7 +947,7 @@
 	GetVerticesCoordinates(&xyz_list[0][0],vertices,NUMVERTICES);
 	IssmDouble dt = parameters->FindParam(TimesteppingTimeStepEnum);
-	Input* vxaverage_input    = tria->inputs->GetInput(VxEnum);                        _assert_(vxaverage_input);
-	Input* vyaverage_input    = tria->inputs->GetInput(VyEnum);                        _assert_(vyaverage_input);
-	Input* spcthickness_input = tria->inputs->GetInput(MasstransportSpcthicknessEnum); _assert_(spcthickness_input);
+	Input2* vxaverage_input    = tria->GetInput2(VxEnum);                        _assert_(vxaverage_input);
+	Input2* vyaverage_input    = tria->GetInput2(VyEnum);                        _assert_(vyaverage_input);
+	Input2* spcthickness_input = tria->GetInput2(MasstransportSpcthicknessEnum); _assert_(spcthickness_input);
 	GetNormal(&normal[0],xyz_list);
 
Index: /issm/trunk/src/c/classes/Loads/Riftfront.cpp
===================================================================
--- /issm/trunk/src/c/classes/Loads/Riftfront.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Loads/Riftfront.cpp	(revision 24686)
@@ -592,5 +592,4 @@
 }
 /*}}}*/
-#define _ZIGZAGCOUNTER_
 int    Riftfront::Constrain(int* punstable){/*{{{*/
 
Index: /issm/trunk/src/c/classes/Materials/Material.h
===================================================================
--- /issm/trunk/src/c/classes/Materials/Material.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Materials/Material.h	(revision 24686)
@@ -16,4 +16,5 @@
 class Gauss;
 class Input;
+class Input2;
 /*}}}*/
 
@@ -45,14 +46,14 @@
 		virtual void       ResetHooks()=0;
 
-		virtual void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input)=0;
 		virtual void       ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss)=0;
-		virtual void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input)=0;
 		virtual void       ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss)=0;
-		virtual void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* surf)=0;
-		virtual void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input)=0;
 		virtual void       ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss)=0;
-		virtual void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input,IssmDouble epseff)=0;
-		virtual void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble epseff)=0;
-		virtual void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble epseff)=0;
+		virtual void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input)=0;
+		virtual void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input)=0;
+		virtual void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* surf)=0;
+		virtual void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input)=0;
+		virtual void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input,IssmDouble epseff)=0;
+		virtual void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble epseff)=0;
+		virtual void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble epseff)=0;
 
 };
Index: /issm/trunk/src/c/classes/Materials/Matestar.cpp
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matestar.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Materials/Matestar.cpp	(revision 24686)
@@ -11,9 +11,4 @@
 #include "./Matestar.h"
 #include "./Materials.h"
-#include "../Inputs/Input.h"
-#include "../Inputs/Inputs.h"
-#include "../Inputs/TriaInput.h"
-#include "../Inputs/PentaInput.h"
-#include "../Inputs/ControlInput.h"
 #include "../Elements/Element.h"
 #include "../Elements/Tria.h"
@@ -160,5 +155,5 @@
 	IssmDouble B;
 
-	Input* B_input = element->GetInput(MaterialsRheologyBEnum); _assert_(B_input);
+	Input2* B_input = element->GetInput2(MaterialsRheologyBEnum); _assert_(B_input);
 	B_input->GetInputValue(&B,gauss);
 	return B;
@@ -170,5 +165,5 @@
 	IssmDouble Bbar;
 
-	Input* B_input = element->GetInput(MaterialsRheologyBbarEnum); _assert_(B_input);
+	Input2* B_input = element->GetInput2(MaterialsRheologyBbarEnum); _assert_(B_input);
 	B_input->GetInputValue(&Bbar,gauss);
 	return Bbar;
@@ -189,5 +184,5 @@
 	IssmDouble Ec;
 
-	Input* Ec_input = element->GetInput(MaterialsRheologyEcEnum); _assert_(Ec_input);
+	Input2* Ec_input = element->GetInput2(MaterialsRheologyEcEnum); _assert_(Ec_input);
 	Ec_input->GetInputValue(&Ec,gauss);
 	return Ec;
@@ -199,5 +194,5 @@
 	IssmDouble Ecbar;
 
-	Input* Ecbar_input = element->GetInput(MaterialsRheologyEcbarEnum); _assert_(Ecbar_input);
+	Input2* Ecbar_input = element->GetInput2(MaterialsRheologyEcbarEnum); _assert_(Ecbar_input);
 	Ecbar_input->GetInputValue(&Ecbar,gauss);
 	return Ecbar;
@@ -209,5 +204,5 @@
 	IssmDouble Es;
 
-	Input* Es_input = element->GetInput(MaterialsRheologyEsEnum); _assert_(Es_input);
+	Input2* Es_input = element->GetInput2(MaterialsRheologyEsEnum); _assert_(Es_input);
 	Es_input->GetInputValue(&Es,gauss);
 	return Es;
@@ -219,5 +214,5 @@
 	IssmDouble Esbar;
 
-	Input* Esbar_input = element->GetInput(MaterialsRheologyEsbarEnum); _assert_(Esbar_input);
+	Input2* Esbar_input = element->GetInput2(MaterialsRheologyEsbarEnum); _assert_(Esbar_input);
 	Esbar_input->GetInputValue(&Esbar,gauss);
 	return Esbar;
@@ -388,5 +383,15 @@
 }
 /*}}}*/
-void  Matestar::ViscosityBFS(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input,IssmDouble eps_eff){/*{{{*/
+void  Matestar::ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
+	this->GetViscosityDerivativeEpsSquare(pmu_prime,epsilon,gauss);
+}/*}}}*/
+void  Matestar::ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
+	_error_("not implemented yet");
+}/*}}}*/
+void  Matestar::ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
+	_error_("not implemented yet");
+}/*}}}*/
+
+void  Matestar::ViscosityBFS(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input,IssmDouble eps_eff){/*{{{*/
 
 	/*Intermediaries*/
@@ -417,5 +422,5 @@
 }
 /*}}}*/
-void  Matestar::ViscosityBHO(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble eps_eff){/*{{{*/
+void  Matestar::ViscosityBHO(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble eps_eff){/*{{{*/
 
 	/*Intermediaries*/
@@ -445,5 +450,5 @@
 	*pdmudB=GetViscosity_BGeneral(vx,vy,vz,&dvx[0],&dvy[0],&dvz[0],eps_eff,isdepthaveraged,gauss);
 }/*}}}*/
-void  Matestar::ViscosityBSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble eps_eff){/*{{{*/
+void  Matestar::ViscosityBSSA(IssmDouble* pdmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble eps_eff){/*{{{*/
 	/*Intermediaries*/
 	IssmDouble vx,vy,vz;
@@ -475,5 +480,5 @@
 	*pdmudB=GetViscosity_BGeneral(vx,vy,vz,&dvx[0],&dvy[0],&dvz[0],eps_eff,isdepthaveraged,gauss);
 }/*}}}*/
-void  Matestar::ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){/*{{{*/
+void  Matestar::ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -518,8 +523,5 @@
 }
 /*}}}*/
-void  Matestar::ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
-	this->GetViscosityDerivativeEpsSquare(pmu_prime,epsilon,gauss);
-}/*}}}*/
-void  Matestar::ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void  Matestar::ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -563,11 +565,8 @@
 	*pviscosity=GetViscosityGeneral(vx,vy,vz,&dvx[0],&dvy[0],&dvz[0],eps_eff,isdepthaveraged,gauss);
 }/*}}}*/
-void  Matestar::ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
-	_error_("not implemented yet");
-}/*}}}*/
-void  Matestar::ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* surface_input){/*{{{*/
-	_error_("not implemented yet");
-}/*}}}*/
-void  Matestar::ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void  Matestar::ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* surface_input){/*{{{*/
+	_error_("not implemented yet");
+}/*}}}*/
+void  Matestar::ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -614,5 +613,2 @@
 	*pviscosity=GetViscosityGeneral(vx,vy,vz,&dvx[0],&dvy[0],&dvz[0],eps_eff,isdepthaveraged,gauss);
 }/*}}}*/
-void  Matestar::ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
-	_error_("not implemented yet");
-}/*}}}*/
Index: /issm/trunk/src/c/classes/Materials/Matestar.h
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matestar.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Materials/Matestar.h	(revision 24686)
@@ -71,14 +71,15 @@
 		void       SetCurrentConfiguration(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters);
 
-		void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input);
 		void       ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss);
-		void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
 		void       ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss);
-		void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* surf);
-		void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
 		void       ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss);
-		void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input,IssmDouble eps_eff);
-		void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble eps_eff);
-		void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble eps_eff);
+
+		void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input);
+		void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* surf);
+		void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input,IssmDouble eps_eff);
+		void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble eps_eff);
+		void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble eps_eff);
 		/*}}}*/
 		IssmDouble GetViscosityGeneral(IssmDouble vx,IssmDouble vy,IssmDouble vz,IssmDouble* dvx,IssmDouble* dvy,IssmDouble* dvz,IssmDouble eps_eff,bool isdepthaveraged,Gauss* gauss);
Index: /issm/trunk/src/c/classes/Materials/Matice.cpp
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matice.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Materials/Matice.cpp	(revision 24686)
@@ -11,9 +11,4 @@
 #include "./Matice.h"
 #include "./Materials.h"
-#include "../Inputs/Input.h"
-#include "../Inputs/Inputs.h"
-#include "../Inputs/TriaInput.h"
-#include "../Inputs/PentaInput.h"
-#include "../Inputs/ControlInput.h"
 #include "../Elements/Element.h"
 #include "../Elements/Tria.h"
@@ -221,6 +216,5 @@
 	/*Output*/
 	IssmDouble B;
-
-	Input* B_input = element->GetInput(MaterialsRheologyBEnum); _assert_(B_input);
+	Input2* B_input = element->GetInput2(MaterialsRheologyBEnum); _assert_(B_input);
 	B_input->GetInputValue(&B,gauss);
 	return B;
@@ -234,5 +228,5 @@
 	IssmDouble Bbar;
 
-	Input* B_input = element->GetInput(MaterialsRheologyBbarEnum); _assert_(B_input);
+	Input2* B_input = element->GetInput2(MaterialsRheologyBbarEnum); _assert_(B_input);
 	B_input->GetInputValue(&Bbar,gauss);
 	return Bbar;
@@ -245,5 +239,5 @@
 	IssmDouble D;
 	if(this->isdamaged){
-		Input* D_input = element->GetInput(DamageDEnum); _assert_(D_input);
+		Input2* D_input = element->GetInput2(DamageDEnum); _assert_(D_input);
 		D_input->GetInputValue(&D,gauss);
 	}
@@ -260,5 +254,5 @@
 	IssmDouble Dbar;
 	if(this->isdamaged){
-		Input* D_input = element->GetInput(DamageDbarEnum); _assert_(D_input);
+		Input2* D_input = element->GetInput2(DamageDbarEnum); _assert_(D_input);
 		D_input->GetInputValue(&Dbar,gauss);
 	}
@@ -274,5 +268,5 @@
 	/*Output*/
 	IssmDouble E;
-	Input* E_input = element->GetInput(MaterialsRheologyEEnum); _assert_(E_input);
+	Input2* E_input = element->GetInput2(MaterialsRheologyEEnum); _assert_(E_input);
 	E_input->GetInputValue(&E,gauss);
 	return E;
@@ -284,5 +278,5 @@
 	/*Output*/
 	IssmDouble Ebar;
-	Input* E_input = element->GetInput(MaterialsRheologyEbarEnum); _assert_(E_input);
+	Input2* E_input = element->GetInput2(MaterialsRheologyEbarEnum); _assert_(E_input);
 	E_input->GetInputValue(&Ebar,gauss);
 	return Ebar;
@@ -293,6 +287,6 @@
 	/*Output*/
 	IssmDouble n;
-
-	element->inputs->GetInputAverage(&n,MaterialsRheologyNEnum);
+	Input2* n_input = element->GetInput2(MaterialsRheologyNEnum); _assert_(n_input);
+	n_input->GetInputAverage(&n);
 	return n;
 }
@@ -667,5 +661,15 @@
 }
 /*}}}*/
-void  Matice::ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){/*{{{*/
+void  Matice::ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
+	this->GetViscosityDerivativeEpsSquare(pmu_prime,epsilon,gauss);
+}/*}}}*/
+void  Matice::ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
+	this->GetViscosityDerivativeEpsSquare(pmu_prime,epsilon,gauss);
+}/*}}}*/
+void  Matice::ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
+	this->GetViscosity2dDerivativeEpsSquare(pmu_prime,epsilon,gauss);
+}/*}}}*/
+
+void  Matice::ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){/*{{{*/
 	/*The effective strain rate is defined in Paterson 3d Ed p 91 eq 9,
 	 * and Cuffey p 303 eq 8.18:
@@ -706,8 +710,5 @@
 }
 /*}}}*/
-void  Matice::ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
-	this->GetViscosityDerivativeEpsSquare(pmu_prime,epsilon,gauss);
-}/*}}}*/
-void  Matice::ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void  Matice::ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -735,8 +736,5 @@
 	*pviscosity=viscosity;
 }/*}}}*/
-void  Matice::ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
-	this->GetViscosityDerivativeEpsSquare(pmu_prime,epsilon,gauss);
-}/*}}}*/
-void  Matice::ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* surface_input){/*{{{*/
+void  Matice::ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* surface_input){/*{{{*/
 	/*Compute the L1L2 viscosity
 	 *
@@ -797,5 +795,5 @@
 	*pviscosity = viscosity;
 }/*}}}*/
-void  Matice::ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){/*{{{*/
+void  Matice::ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){/*{{{*/
 
 	/*Intermediaries*/
@@ -822,5 +820,2 @@
 	*pviscosity=viscosity;
 }/*}}}*/
-void  Matice::ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){/*{{{*/
-	this->GetViscosity2dDerivativeEpsSquare(pmu_prime,epsilon,gauss);
-}/*}}}*/
Index: /issm/trunk/src/c/classes/Materials/Matice.h
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matice.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Materials/Matice.h	(revision 24686)
@@ -73,14 +73,15 @@
 		void       SetCurrentConfiguration(Elements* elements,Loads* loads,Nodes* nodes,Vertices* vertices,Materials* materials,Parameters* parameters);
 
-		void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input);
 		void       ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss);
-		void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
 		void       ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss);
-		void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* surf);
-		void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input);
 		void       ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss);
-		void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input,IssmDouble eps_eff){_error_("not supported");};
-		void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble eps_eff){_error_("not supported");};
-		void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble eps_eff){_error_("not supported");};
+
+		void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input);
+		void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* surf);
+		void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input);
+		void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input,IssmDouble eps_eff){_error_("not supported");};
+		void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble eps_eff){_error_("not supported");};
+		void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble eps_eff){_error_("not supported");};
 		/*}}}*/
 };
Index: /issm/trunk/src/c/classes/Materials/Matlitho.h
===================================================================
--- /issm/trunk/src/c/classes/Materials/Matlitho.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Materials/Matlitho.h	(revision 24686)
@@ -62,14 +62,15 @@
 		void       ResetHooks();
 
-		void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input){_error_("not supported");};
 		void       ViscosityFSDerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){_error_("not supported");};
-		void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){_error_("not supported");};
 		void       ViscosityHODerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){_error_("not supported");};
-		void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* surf){_error_("not supported");};
-		void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input){_error_("not supported");};
 		void       ViscositySSADerivativeEpsSquare(IssmDouble* pmu_prime,IssmDouble* epsilon,Gauss* gauss){_error_("not supported");};
-		void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,Input* vz_input,IssmDouble epseff){_error_("not supported");};
-		void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble epseff){_error_("not supported");};
-		void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input* vx_input,Input* vy_input,IssmDouble epseff){_error_("not supported");};
+
+		void       ViscosityFS(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input){_error_("not supported");};
+		void       ViscosityHO(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){_error_("not supported");};
+		void       ViscosityL1L2(IssmDouble* pviscosity,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* surf){_error_("not supported");};
+		void       ViscositySSA(IssmDouble* pviscosity,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input){_error_("not supported");};
+		void       ViscosityBFS(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,Input2* vz_input,IssmDouble epseff){_error_("not supported");};
+		void       ViscosityBHO(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble epseff){_error_("not supported");};
+		void       ViscosityBSSA(IssmDouble* pmudB,int dim,IssmDouble* xyz_list,Gauss* gauss,Input2* vx_input,Input2* vy_input,IssmDouble epseff){_error_("not supported");};
 
 		/*}}}*/
Index: /issm/trunk/src/c/classes/Misfit.cpp
===================================================================
--- /issm/trunk/src/c/classes/Misfit.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Misfit.cpp	(revision 24686)
@@ -20,5 +20,4 @@
 #include "../modules/SurfaceAreax/SurfaceAreax.h"
 #include "../classes/Params/Parameters.h"
-#include "../classes/Inputs/Input.h"
 #include "../classes/gauss/Gauss.h"
 /*}}}*/
@@ -235,6 +234,6 @@
 		 model=OutputDefinitionsResponsex(femmodel,this->model_enum);
 		 /*Now, the observation is buried inside the elements, go fish it in the first element (cludgy, needs fixing): */
-		 Element* element=(Element*)femmodel->elements->GetObjectByOffset(0); _assert_(element);
-		 Input* input = element->GetInput(observation_enum); _assert_(input);
+		 Element* element = (Element*)femmodel->elements->GetObjectByOffset(0); _assert_(element);
+		 Input2*  input   = element->GetInput2(observation_enum); _assert_(input);
 		 input->GetInputAverage(&observation);
 
Index: /issm/trunk/src/c/classes/Nodalvalue.cpp
===================================================================
--- /issm/trunk/src/c/classes/Nodalvalue.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Nodalvalue.cpp	(revision 24686)
@@ -15,5 +15,4 @@
 #include "../modules/SurfaceAreax/SurfaceAreax.h"
 #include "../classes/Params/Parameters.h"
-#include "../classes/Inputs/Input.h"
 #include "../classes/gauss/Gauss.h"
 #include "./classes.h"
Index: /issm/trunk/src/c/classes/Nodes.cpp
===================================================================
--- /issm/trunk/src/c/classes/Nodes.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Nodes.cpp	(revision 24686)
@@ -185,5 +185,5 @@
 	 * up by their clones: */
 	int  maxdofspernode = this->MaxNumDofs(setenum);
-	int* truedofs       = xNew<int>(this->Size()*maxdofspernode); //only one alloc
+	int* truedofs       = xNewZeroInit<int>(this->Size()*maxdofspernode); //only one alloc
 	for(int rank=0;rank<num_procs;rank++){
 		if(this->common_send[rank]){
Index: /issm/trunk/src/c/classes/Params/Parameters.cpp
===================================================================
--- /issm/trunk/src/c/classes/Params/Parameters.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Params/Parameters.cpp	(revision 24686)
@@ -233,5 +233,5 @@
 			}
 			else if(obj_enum==GenericParamEnum){
-				/*Skip for now (we don't want to Marhsall Comms*/
+				/*Skip for now (we don't want to Marhsall Comms)*/
 			}
 		}
Index: /issm/trunk/src/c/classes/Radar.cpp
===================================================================
--- /issm/trunk/src/c/classes/Radar.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Radar.cpp	(revision 24686)
@@ -17,5 +17,4 @@
 #include "./FemModel.h"
 #include "../classes/Params/Parameters.h"
-#include "../classes/Inputs/Input.h"
 #include "../classes/gauss/Gauss.h"
 #include "./Radar.h"
@@ -129,6 +128,6 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* temp_input=element->GetInput(TemperatureEnum); _assert_(temp_input);
-	Input* ice_period_input=element->GetInput(RadarIcePeriodEnum); _assert_(ice_period_input); 
+	Input2* temp_input=element->GetInput2(TemperatureEnum); _assert_(temp_input);
+	Input2* ice_period_input=element->GetInput2(RadarIcePeriodEnum); _assert_(ice_period_input); 
 
 	/* Start looping on the number of vertices: */
@@ -188,6 +187,6 @@
 
 		/*Add Attenuation rate results into inputs*/
-	   element->AddInput(new PentaInput(RadarAttenuationMacGregorEnum,&attenuation_rate_macgregor[0],P1Enum));
-		element->AddInput(new PentaInput(RadarAttenuationWolffEnum,&attenuation_rate_wolff[0],P1Enum));
+	   element->AddInput2(RadarAttenuationMacGregorEnum,&attenuation_rate_macgregor[0],P1Enum);
+		element->AddInput2(RadarAttenuationWolffEnum,&attenuation_rate_wolff[0],P1Enum);
 
 		/*Clean up*/
@@ -213,9 +212,9 @@
 	/* Get node coordinates*/
 	element->GetVerticesCoordinates(&xyz_list);
-	Input* atten_input_M07=element->GetInput(RadarAttenuationMacGregorEnum); _assert_(atten_input_M07);
-	Input* atten_input_W97=element->GetInput(RadarAttenuationWolffEnum); _assert_(atten_input_W97);
-	Input* surf_input=element->GetInput(SurfaceEnum); _assert_(surf_input);
-	Input* thick_input=element->GetInput(ThicknessEnum); _assert_(thick_input);
-	Input* temp_input=element->GetInput(TemperatureEnum); _assert_(temp_input);
+	Input2 *atten_input_M07 = element->GetInput2(RadarAttenuationMacGregorEnum); _assert_(atten_input_M07);
+	Input2 *atten_input_W97 = element->GetInput2(RadarAttenuationWolffEnum);     _assert_(atten_input_W97);
+	Input2 *surf_input      = element->GetInput2(SurfaceEnum);                   _assert_(surf_input);
+	Input2 *thick_input     = element->GetInput2(ThicknessEnum);                 _assert_(thick_input);
+	Input2 *temp_input      = element->GetInput2(TemperatureEnum);               _assert_(temp_input);
 
 	/* Start looping on the number of vertices: */
@@ -270,6 +269,6 @@
 
 	    /*Add power results into inputs*/
-			element->AddInput(new PentaInput(RadarPowerMacGregorEnum,&power_M07[0],P1Enum));
-			element->AddInput(new PentaInput(RadarPowerWolffEnum,&power_W97[0],P1Enum));
+			element->AddInput2(RadarPowerMacGregorEnum,&power_M07[0],P1Enum);
+			element->AddInput2(RadarPowerWolffEnum,&power_W97[0],P1Enum);
 
 		/*Clean up and return*/
Index: /issm/trunk/src/c/classes/Vertices.cpp
===================================================================
--- /issm/trunk/src/c/classes/Vertices.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/Vertices.cpp	(revision 24686)
@@ -171,5 +171,5 @@
 /*}}}*/
 
-void Vertices::Finalize(){/*{{{*/
+void Vertices::Finalize(IoModel* iomodel){/*{{{*/
 
 	/*Here we do 3 things:
@@ -195,12 +195,23 @@
 
 	/*2. Distribute lids (First: masters, then clones)*/
+	iomodel->my_vertices_lids=xNew<int>(this->numberofvertices);
+	for(int i=0;i<this->numberofvertices;i++) iomodel->my_vertices_lids[i] = -1;
+
 	int lid = 0;
 	for(int i=0;i<this->Size();i++){
 		Vertex* vertex=xDynamicCast<Vertex*>(this->GetObjectByOffset(i));
-		if(!vertex->clone) vertex->lid=lid++;
-	}
-	for(int i=0;i<this->Size();i++){
-		Vertex* vertex=xDynamicCast<Vertex*>(this->GetObjectByOffset(i));
-		if(vertex->clone) vertex->lid=lid++;
+		if(!vertex->clone){
+			vertex->lid=lid;
+			iomodel->my_vertices_lids[vertex->sid] = lid;
+			lid++;
+		}
+	}
+	for(int i=0;i<this->Size();i++){
+		Vertex* vertex=xDynamicCast<Vertex*>(this->GetObjectByOffset(i));
+		if(vertex->clone){
+			vertex->lid=lid;
+			iomodel->my_vertices_lids[vertex->sid] = lid;
+			lid++;
+		}
 	}
 
Index: /issm/trunk/src/c/classes/Vertices.h
===================================================================
--- /issm/trunk/src/c/classes/Vertices.h	(revision 24685)
+++ /issm/trunk/src/c/classes/Vertices.h	(revision 24686)
@@ -5,4 +5,5 @@
 #include "../datastructures/datastructures.h"
 #include "../shared/shared.h"
+class IoModel;
 
 /*!\brief Declaration of Vertices class.
@@ -33,5 +34,5 @@
 
 		/*numerics:*/
-		void  Finalize(void);
+		void  Finalize(IoModel* iomodel);
 		int   NumberOfVertices(void);
 		int   NumberOfVerticesLocal(void);
Index: /issm/trunk/src/c/classes/classes.h
===================================================================
--- /issm/trunk/src/c/classes/classes.h	(revision 24685)
+++ /issm/trunk/src/c/classes/classes.h	(revision 24686)
@@ -66,18 +66,7 @@
 #include "./Options/OptionUtilities.h"
 
-/*Inputs: */
-#include "./Inputs/Inputs.h"
-#include "./Inputs/Input.h"
-#include "./Inputs/BoolInput.h"
-#include "./Inputs/DoubleInput.h"
-#include "./Inputs/DoubleArrayInput.h"
-#include "./Inputs/IntInput.h"
-#include "./Inputs/TetraInput.h"
-#include "./Inputs/PentaInput.h"
-#include "./Inputs/TriaInput.h"
-#include "./Inputs/SegInput.h"
-#include "./Inputs/ControlInput.h"
-#include "./Inputs/DatasetInput.h"
-#include "./Inputs/TransientInput.h"
+/*Inputs2*/
+#include "./Inputs2/Inputs2.h"
+#include "./Inputs2/Input2.h"
 
 /*ExternalResults: */
Index: /issm/trunk/src/c/classes/kriging/Observations.cpp
===================================================================
--- /issm/trunk/src/c/classes/kriging/Observations.cpp	(revision 24685)
+++ /issm/trunk/src/c/classes/kriging/Observations.cpp	(revision 24686)
@@ -70,5 +70,5 @@
 			break;
 		default:
-			_error_("Tree type "<<this->treetype<<" not supported yet (1: quadtree, 2: covertree)");
+			_printf_("Tree type "<<this->treetype<<" not supported yet (1: quadtree, 2: covertree)");
 	}
 	return;
Index: /issm/trunk/src/c/cores/control_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/control_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/control_core.cpp	(revision 24686)
@@ -143,5 +143,4 @@
 	/*Update control input*/
 	SetControlInputsFromVectorx(femmodel,X);
-	
 	
 	/*solve forward: */
Index: /issm/trunk/src/c/cores/cores.h
===================================================================
--- /issm/trunk/src/c/cores/cores.h	(revision 24685)
+++ /issm/trunk/src/c/cores/cores.h	(revision 24686)
@@ -61,4 +61,6 @@
 void sealevelrise_diagnostics(FemModel* femmodel,Vector<IssmDouble>* RSLg);
 IssmDouble objectivefunction(IssmDouble search_scalar,FemModel* femmodel);
+void GetStericRate(Vector<IssmDouble> ** psteric_rate_g, FemModel* femmodel);
+void GetDynamicRate(Vector<IssmDouble> ** pdynamic_rate_g, FemModel* femmodel);
 
 //optimization
Index: /issm/trunk/src/c/cores/dakota_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/dakota_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/dakota_core.cpp	(revision 24686)
@@ -1,48 +1,48 @@
 /*!\file:  dakota_core.cpp
- * \brief: wrapper to the Dakota capabilities. qmu fires up Dakota, and registers a Dakota Pluggin
- * which will be in charge of running the solution sequences repeteadly, to garner statistics. 
- *
- * This routine deals with running ISSM and Dakota in library mode. In library mode, Dakota does not 
- * run as an execuatble. Its capabilities are linked into the ISSM software. ISSM calls dakota routines 
- * directly from the dakota library. qmu.cpp is the code that is in charge of calling those routines. 
- *
- * Dakota has its own way of running in parallel (for embarassingly parallel jobs). We do not want that, 
- * as ISSM knows exactly how to run "really parallel" jobs that use all CPUS. To bypass Dakota's parallelism, 
+ * \brief: wrapper to the Dakota capabilities. qmu fires up Dakota, and registers a Dakota Plugin
+ * which will be in charge of running the solution sequences repeatedly, to garner statistics.
+ *
+ * This routine deals with running ISSM and Dakota in library mode. In library mode, Dakota does not
+ * run as an executable. Its capabilities are linked into the ISSM software. ISSM calls Dakota routines
+ * directly from the Dakota library. qmu.cpp is the code that is in charge of calling those routines.
+ *
+ * Dakota has its own way of running in parallel (for embarrassingly parallel jobs). We do not want that,
+ * as ISSM knows exactly how to run "really parallel" jobs that use all CPUS. To bypass Dakota's parallelism,
  * we overloaded the constructor for the parallel library (see the Dakota patch in the externalpackages/dakota
- * directory). This overloaded constructor fires up Dakota serially on CPU 0 only! We take care of broadcasting 
- * to the other CPUS, hence ISSM is running in parallel, and Dakota serially on CPU0. 
- *
- * Now, how does CPU 0 drive all other CPUS to carry out sensitivity analysese? By synchronizing its call to 
- * our ISSM cores (stressbalance_core, thermal_core, transient_core, etc ...) on CPU 0 with all other CPUS. 
- * This explains the structure of qmu.cpp, where cpu 0 runs Dakota, the Dakota pluggin fires up DakotaSpawnCore.cpp, 
- * while the other CPUS are waiting for a broadcast from CPU0, once they get it, they also fire up 
- * DakotaSpawnCore. In the end, DakotaSpawnCore is fired up on all CPUS, with CPU0 having Dakota inputs, that it will 
- * broacast to other CPUS. 
- *
- * Now, how does dakota call the DakotaSpawnCore routine? The DakotaSpawnCore is embedded into the DakotaPlugin object 
- * which is derived from the Direct Interface Dakota objct. This is the only way to run Dakota in library 
- * mode (see their developper guide for more info). Dakota registers the DakotaPlugin object into its own 
- * database, and calls on the embedded DakotaSpawnCore from CPU0. 
- *
- */ 
+ * directory). This overloaded constructor fires up Dakota serially on CPU 0 only! We take care of broadcasting
+ * to the other CPUS, hence ISSM is running in parallel, and Dakota serially on CPU0.
+ *
+ * Now, how does CPU 0 drive all other CPUS to carry out sensitivity analyses? By synchronizing its call to
+ * our ISSM cores (stressbalance_core, thermal_core, transient_core, etc ...) on CPU 0 with all other CPUS.
+ * This explains the structure of qmu.cpp, where cpu 0 runs Dakota, the Dakota pluggin fires up DakotaSpawnCore.cpp,
+ * while the other CPUS are waiting for a broadcast from CPU0, once they get it, they also fire up
+ * DakotaSpawnCore. In the end, DakotaSpawnCore is fired up on all CPUS, with CPU0 having Dakota inputs, that it will
+ * broadcast to other CPUS.
+ *
+ * Now, how does Dakota call the DakotaSpawnCore routine? The DakotaSpawnCore is embedded into the DakotaPlugin object
+ * which is derived from the Direct Interface Dakota object. This is the only way to run Dakota in library
+ * mode (see their developer guide for more info). Dakota registers the DakotaPlugin object into its own
+ * database, and calls on the embedded DakotaSpawnCore from CPU0.
+ *
+ */
 
  /* \brief: run core ISSM solution using Dakota inputs coming from CPU 0.
  * \sa qmu.cpp DakotaPlugin.cpp
  *
- * This routine needs to be understood simultaneously with qmu.cpp and DakotaPlugin. 
- * DakotaSpawnCoreParallel is called by all CPUS, with CPU 0 holding Dakota variable values, along 
- * with variable descriptors. 
- *
- * DakotaSpawnCoreParallel takes care of broadcasting the variables and their descriptors across the MPI 
- * ring. Once this is done, we use the variables to modify the inputs for the solution core. 
- * For ex, if "rho_ice" is provided, for ex 920, we include "rho_ice" in the inputs, then 
- * call the core with the modified inputs. This is the way we get Dakota to explore the parameter 
- * spce of the core. 
- *
- * Once the core is called, we process the results of the core, and using the processed results, 
- * we compute response functions. The responses are computed on all CPUS, but they are targeted 
- * for CPU 0, which will get these values back to the Dakota engine. 
- *
- */ 
+ * This routine needs to be understood simultaneously with qmu.cpp and DakotaPlugin.
+ * DakotaSpawnCoreParallel is called by all CPUS, with CPU 0 holding Dakota variable values, along
+ * with variable descriptors.
+ *
+ * DakotaSpawnCoreParallel takes care of broadcasting the variables and their descriptors across the MPI
+ * ring. Once this is done, we use the variables to modify the inputs for the solution core.
+ * For ex, if "rho_ice" is provided, for ex 920, we include "rho_ice" in the inputs, then
+ * call the core with the modified inputs. This is the way we get Dakota to explore the parameter
+ * space of the core.
+ *
+ * Once the core is called, we process the results of the core, and using the processed results,
+ * we compute response functions. The responses are computed on all CPUS, but they are targeted
+ * for CPU 0, which will get these values back to the Dakota engine.
+ *
+ */
 
 /*include config: {{{*/
@@ -80,5 +80,5 @@
 void DakotaFree(double** pvariables,char*** pvariables_descriptors,char*** presponses_descriptors,int numvariables,int numresponses){ /*{{{*/
 
-	/*\brief DakotaFree: free allocations on other cpus, not done by Dakota.*/
+	/*\brief DakotaFree: free allocations on other CPUs, not done by Dakota.*/
 
 	int i;
@@ -98,5 +98,5 @@
 	my_rank=IssmComm::GetRank();
 
-	/*Free variables and variables_descriptors only on cpu !=0*/
+	/*Free variables and variables_descriptors only on CPU !=0*/
 	if(my_rank!=0){
 		xDelete<double>(variables);
@@ -108,5 +108,5 @@
 	}
 
-	//responses descriptors on every cpu
+	//responses descriptors on every CPU
 	for(i=0;i<numresponses;i++){
 		string=responses_descriptors[i];
@@ -124,6 +124,6 @@
 
 	/* * \brief: broadcast variables_descriptors, variables, numvariables and numresponses
-	 * from cpu 0 to all other cpus.
-	 */ 
+	 * from CPU 0 to all other CPUs.
+	 */
 
 	int i;
@@ -150,9 +150,9 @@
 
 	/*numvariables: */
-	ISSM_MPI_Bcast(&numvariables,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+	ISSM_MPI_Bcast(&numvariables,1,ISSM_MPI_INT,0,IssmComm::GetComm());
 
 	/*variables:*/
 	if(my_rank!=0)variables=xNew<double>(numvariables);
-	ISSM_MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,IssmComm::GetComm()); 
+	ISSM_MPI_Bcast(variables,numvariables,MPI_DOUBLE,0,IssmComm::GetComm());
 
 	/*variables_descriptors: */
@@ -165,12 +165,12 @@
 			string_length=(strlen(string)+1)*sizeof(char);
 		}
-		ISSM_MPI_Bcast(&string_length,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+		ISSM_MPI_Bcast(&string_length,1,ISSM_MPI_INT,0,IssmComm::GetComm());
 		if(my_rank!=0)string=xNew<char>(string_length);
-		ISSM_MPI_Bcast(string,string_length,ISSM_MPI_CHAR,0,IssmComm::GetComm()); 
+		ISSM_MPI_Bcast(string,string_length,ISSM_MPI_CHAR,0,IssmComm::GetComm());
 		if(my_rank!=0)variables_descriptors[i]=string;
 	}
 
 	/*numresponses: */
-	ISSM_MPI_Bcast(&numresponses,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+	ISSM_MPI_Bcast(&numresponses,1,ISSM_MPI_INT,0,IssmComm::GetComm());
 
 	/*Assign output pointers:*/
@@ -182,5 +182,5 @@
 int  DakotaSpawnCore(double* d_responses, int d_numresponses, double* d_variables, char** d_variables_descriptors,int d_numvariables, void* void_femmodel,int counter){ /*{{{*/
 
-	/*Notice the d_, which prefixes anything that is being provided to us by the Dakota pluggin. Careful. some things are ours, some are dakotas!: */
+	/*Notice the d_, which prefixes anything that is being provided to us by the Dakota plugin. Careful: some things are ours; some are DDkota's!: */
 
 	char     **responses_descriptors    = NULL;      //these are our! there are only numresponsedescriptors of them, not d_numresponses!!!
@@ -192,10 +192,10 @@
 	bool       nodakotacore             = true;
 
-	/*If counter==-1 on cpu0, it means that the dakota runs are done. In which case, bail out and return 0: */
-	ISSM_MPI_Bcast(&counter,1,ISSM_MPI_INT,0,IssmComm::GetComm()); 
+	/*If counter==-1 on CPU 0, it means that the Dakota runs are done. In which case, bail out and return 0: */
+	ISSM_MPI_Bcast(&counter,1,ISSM_MPI_INT,0,IssmComm::GetComm());
 	if(counter==-1)return 0;
 
-	/*cast void_femmodel to FemModel, and at the same time, make a copy, so we start this new core run for this specific sample 
-	 *with a brand new copy of the model, which has not been tempered with by previous dakota runs: */
+	/*cast void_femmodel to FemModel, and at the same time, make a copy, so we start this new core run for this specific sample
+	 *with a brand new copy of the model, which has not been tempered with by previous Dakota runs: */
 	femmodel=(reinterpret_cast<FemModel*>(void_femmodel))->copy();
 
@@ -207,5 +207,5 @@
 	if(VerboseQmu()) _printf0_("qmu iteration: " << counter << "\n");
 
-	/* only cpu 0, running dakota is providing us with variables and variables_descriptors and numresponses: broadcast onto other cpus: */
+	/* only CPU 0, running Dakota is providing us with variables and variables_descriptors and numresponses: broadcast onto other CPUs: */
 	DakotaMPI_Bcast(&d_variables,&d_variables_descriptors,&d_numvariables,&d_numresponses);
 
@@ -231,5 +231,5 @@
 
 	return 1; //this is critical! do not return 0, otherwise, dakota_core will stop running!
-} 
+}
 /*}}}*/
 void dakota_core(FemModel* femmodel){  /*{{{*/
@@ -255,6 +255,6 @@
 		char* dakotamode=xNew<char>(strlen("serial")+1);
 		xMemCpy<char>(dakotamode,"serial",strlen("serial")+1);
-		Dakota::ParallelLibrary parallel_lib(dakotamode); //use our own ISSM Dakota library mode constructor, which only fires up Dakota on CPU 0. 
-		Dakota::ProblemDescDB problem_db(parallel_lib); 
+		Dakota::ParallelLibrary parallel_lib(dakotamode); //use our own ISSM Dakota library mode constructor, which only fires up Dakota on CPU 0.
+		Dakota::ProblemDescDB problem_db(parallel_lib);
 		xDelete<char>(dakotamode);
 
@@ -289,5 +289,5 @@
 		selected_strategy.run_strategy();
 
-		//Warn other cpus that we are done running the dakota iterator, by setting the counter to -1:
+		//Warn other CPUs that we are done running the Dakota iterator, by setting the counter to -1:
 		DakotaSpawnCore(NULL,0, NULL,NULL,0,femmodel,-1);
 
@@ -296,9 +296,9 @@
 
 		for(;;){
-			if(!DakotaSpawnCore(NULL,0, NULL,NULL,0,femmodel,0))break; //counter came in at -1 on cpu0, bail out.
+			if(!DakotaSpawnCore(NULL,0, NULL,NULL,0,femmodel,0)) break; //counter came in at -1 on CPU 0, bail out.
 		}
 	}
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	xDelete<char>(dakota_input_file);
 	xDelete<char>(dakota_error_file);
@@ -307,6 +307,6 @@
 } /*}}}*/
 #else
-void dakota_core(FemModel* femmodel){ 
-	_error_("dakota_core for versions of Dakota >=6 should not be used anymore! Use instead the issm_dakota  executable!");
+void dakota_core(FemModel* femmodel){
+	_error_("dakota_core for versions of Dakota >=6 should not be used anymore! Use instead the issm_dakota executable!");
 }
 #endif
Index: /issm/trunk/src/c/cores/hydrology_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/hydrology_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/hydrology_core.cpp	(revision 24686)
@@ -97,5 +97,5 @@
 					averagedinput.assign(averagelist,averagelist+2);
 				}
-				femmodel->InitTransientOutputx(&transientinput[0],numaveragedinput);
+				femmodel->InitTransientInputx(&transientinput[0],numaveragedinput);
 				while(substep<dtslices){ //loop on hydro dts
 					substep+=1;
@@ -114,8 +114,8 @@
 					solutionsequence_hydro_nonlinear(femmodel);
                /*If we have a sub-timestep we store the substep inputs in a transient input here*/
-					femmodel->StackTransientOutputx(&substepinput[0],&transientinput[0],subtime,numaveragedinput);
+					femmodel->StackTransientInputx(&substepinput[0],&transientinput[0],subtime,numaveragedinput);
 				}
 				/*averaging the stack*/
-				femmodel->AverageTransientOutputx(&transientinput[0],&averagedinput[0],global_time-dt,subtime,numaveragedinput);
+				femmodel->AverageTransientInputx(&transientinput[0],&averagedinput[0],global_time-dt,subtime,numaveragedinput);
 
 				/*And reseting to global time*/
@@ -178,4 +178,5 @@
 		if(VerboseSolution()) _printf0_("   updating water column\n");
 		HydrologyPismAnalysis* analysis = new HydrologyPismAnalysis();
+		InputDuplicatex(femmodel,WatercolumnEnum,WaterColumnOldEnum);
 		analysis->UpdateWaterColumn(femmodel);
 		delete analysis;
Index: /issm/trunk/src/c/cores/masstransport_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/masstransport_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/masstransport_core.cpp	(revision 24686)
@@ -62,4 +62,7 @@
 		femmodel->SetCurrentConfiguration(MasstransportAnalysisEnum);
 		InputDuplicatex(femmodel,ThicknessEnum,ThicknessOldEnum);
+		InputDuplicatex(femmodel,BaseEnum,BaseOldEnum);
+		InputDuplicatex(femmodel,SurfaceEnum,SurfaceOldEnum);
+		InputDuplicatex(femmodel,SealevelriseCumDeltathicknessEnum,SealevelriseCumDeltathicknessOldEnum);
 		if(stabilization==4){
 			solutionsequence_fct(femmodel);
@@ -67,4 +70,9 @@
 		else{
 			solutionsequence_linear(femmodel);
+			// ThicknessAverage: method not totally tested
+			//if(stabilization==3){
+			//	if(VerboseSolution()) _printf0_("   call thickness average core\n");
+			//	femmodel->ThicknessAverage();
+			//}
 		}
 		femmodel->parameters->SetParam(ThicknessEnum,InputToExtrudeEnum);
Index: /issm/trunk/src/c/cores/sealevelrise_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/sealevelrise_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/sealevelrise_core.cpp	(revision 24686)
@@ -6,7 +6,11 @@
 #include "../toolkits/toolkits.h"
 #include "../classes/classes.h"
+#include "../classes/Inputs2/TriaInput2.h"
+#include "../classes/Inputs2/TransientInput2.h"
+#include "../classes/Inputs2/DatasetInput2.h"
 #include "../shared/shared.h"
 #include "../modules/modules.h"
 #include "../solutionsequences/solutionsequences.h"
+
 
 /*cores:*/
@@ -192,5 +196,5 @@
 
 		//reset cumdeltathickness  to 0: 
-		InputUpdateFromConstantx(femmodel->elements,0.,SealevelriseCumDeltathicknessEnum);
+		InputUpdateFromConstantx(femmodel->inputs2,femmodel->elements,0.,SealevelriseCumDeltathicknessEnum);
 	}
 
@@ -225,4 +229,5 @@
 	Vector<IssmDouble> *SL  = NULL; 
 	Vector<IssmDouble> *steric_rate_g  = NULL; 
+	Vector<IssmDouble> *dynamic_rate_g = NULL;
 	Vector<IssmDouble> *hydro_rate_g  = NULL; 
 	Vector<IssmDouble> *U_esa_rate= NULL;
@@ -255,5 +260,6 @@
 	GetVectorFromInputsx(&bedrock,femmodel,BedEnum,VertexSIdEnum);
 	GetVectorFromInputsx(&SL,femmodel,SealevelEnum,VertexSIdEnum);
-	GetVectorFromInputsx(&steric_rate_g,femmodel,SealevelriseStericRateEnum,VertexSIdEnum);
+	GetStericRate(&steric_rate_g,femmodel);
+	GetDynamicRate(&dynamic_rate_g,femmodel);
 	GetVectorFromInputsx(&hydro_rate_g,femmodel,SealevelriseHydroRateEnum,VertexSIdEnum);
 	if(geodetic){
@@ -264,5 +270,5 @@
 	}
 
-	/*compute: sea level change = initial sea level + (N_gia_rate+N_esa_rate)  * dt + steric_rate + hydro_rate* dt*/
+	/*compute: sea level change = initial sea level + (N_gia_rate+N_esa_rate)  * dt + steric_rate + dynamic_rate + hydro_rate* dt*/
 	if(geodetic){
 		SL->AXPY(N_gia_rate,dt);
@@ -270,4 +276,5 @@
 	}
 	SL->AXPY(steric_rate_g,dt);
+	SL->AXPY(dynamic_rate_g,dt);
 	SL->AXPY(hydro_rate_g,dt);
 
@@ -286,4 +293,5 @@
 	delete SL;
 	delete steric_rate_g;
+	delete dynamic_rate_g;
 	delete hydro_rate_g;
 	if(geodetic){
@@ -558,4 +566,90 @@
 }
 /*}}}*/
+void GetDynamicRate(Vector<IssmDouble> ** pdynamic_rate_g, FemModel* femmodel){ /*{{{*/
+
+	int dslmodel=-1;
+	IssmDouble time;
+
+	/*variables:*/
+	Vector<IssmDouble> *dynamic_rate_g  = NULL; 
+
+	/*Update steric rates before retrieving them on Vertex SID set:*/
+	femmodel->parameters->FindParam(&dslmodel,DslModelEnum);
+	femmodel->parameters->FindParam(&time,TimeEnum);
+	if(dslmodel==1){
+		TransientInput2* transient_input  = femmodel->inputs2->GetTransientInput(DslSeaSurfaceHeightChangeAboveGeoidEnum);
+		TriaInput2* tria_input=transient_input->GetTriaInput(time);
+		Input2* tria_input_copy=tria_input->copy();
+		tria_input_copy->ChangeEnum(DslDynamicRateEnum);
+		femmodel->inputs2->AddInput(tria_input_copy);
+	}
+	else if(dslmodel==2){
+	
+		int modelid;
+		
+		/*Recover modelid:*/
+		femmodel->parameters->FindParam(&modelid,DslModelidEnum);
+		modelid--; //from matlab. 
+		
+		/*find the DslSeaSurfaceHeightChangeAboveGeoidEnum dataset of transient inputs:*/
+		DatasetInput2* dataset_input=femmodel->inputs2->GetDatasetInput2(DslSeaSurfaceHeightChangeAboveGeoidEnum);
+		
+		/*Go find the modelid'th transient input:*/
+		TriaInput2* tria_input=dataset_input->GetTriaInputByOffset(modelid);
+		
+		/*Plug into DslDynamicRate input: */
+		Input2* tria_input_copy=tria_input->copy();
+		tria_input_copy->ChangeEnum(DslDynamicRateEnum);
+		femmodel->inputs2->AddInput(tria_input_copy);
+	}
+	else _error_("not implemented yet");
+
+	GetVectorFromInputsx(&dynamic_rate_g,femmodel,DslDynamicRateEnum,VertexSIdEnum);
+	*pdynamic_rate_g=dynamic_rate_g;
+}
+/*}}}*/
+void GetStericRate(Vector<IssmDouble> ** psteric_rate_g, FemModel* femmodel){ /*{{{*/
+
+	int dslmodel=-1;
+	IssmDouble time;
+
+	/*variables:*/
+	Vector<IssmDouble> *steric_rate_g  = NULL; 
+
+	/*Update steric rates before retrieving them on Vertex SID set:*/
+	femmodel->parameters->FindParam(&dslmodel,DslModelEnum);
+	femmodel->parameters->FindParam(&time,TimeEnum);
+	if(dslmodel==1){
+		TransientInput2* transient_input  = femmodel->inputs2->GetTransientInput(DslGlobalAverageThermostericSeaLevelChangeEnum);
+		TriaInput2* tria_input=transient_input->GetTriaInput(time);
+		Input2* tria_input_copy=tria_input->copy();
+		tria_input_copy->ChangeEnum(DslStericRateEnum);
+		femmodel->inputs2->AddInput(tria_input_copy);
+	}
+	else if (dslmodel==2){
+		int modelid;
+		
+		/*Recover modelid:*/
+		femmodel->parameters->FindParam(&modelid,DslModelidEnum);
+		
+		modelid--; //from matlab. 
+		
+		/*find the DslGlobalAverageThermostericSeaLevelChangeEnum dataset of transient inputs:*/
+		DatasetInput2* dataset_input=femmodel->inputs2->GetDatasetInput2(DslGlobalAverageThermostericSeaLevelChangeEnum);
+		
+		/*Go find the modelid'th transient input:*/
+		TriaInput2* tria_input=dataset_input->GetTriaInputByOffset(modelid);
+		
+		/*Plug into DslStericRate input: */
+		Input2* tria_input_copy=tria_input->copy();
+		tria_input_copy->ChangeEnum(DslStericRateEnum);
+		femmodel->inputs2->AddInput(tria_input_copy);
+	}
+	else _error_("not implemented yet");
+
+	GetVectorFromInputsx(&steric_rate_g,femmodel,DslStericRateEnum,VertexSIdEnum);
+	*psteric_rate_g=steric_rate_g;
+}
+/*}}}*/
 
 /*support routines:*/
Index: /issm/trunk/src/c/cores/smb_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/smb_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/smb_core.cpp	(revision 24686)
@@ -37,5 +37,5 @@
 	int numaveragedinput;
 	femmodel->parameters->FindParam(&dtslices,SmbStepsPerStepEnum);
-	/*intermiedaries to deal with averaging*/
+	/*intermediaries to deal with averaging*/
 	static const int substeplist[2] = {SmbMassBalanceSubstepEnum,SmbRunoffSubstepEnum};
 	static const int transientlist[2] = {SmbMassBalanceTransientEnum,SmbRunoffTransientEnum};
@@ -53,5 +53,5 @@
 	}
 
-	/*if yes compute necessary intermiedaries and start looping*/
+	/*if yes compute necessary intermediaries and start looping*/
 	if (dtslices>1){
 		int        substep;
@@ -68,5 +68,6 @@
 		femmodel->parameters->SetParam(subdt,TimesteppingTimeStepEnum);
 
-		femmodel->InitTransientOutputx(&transientinput[0],numaveragedinput);
+		femmodel->InitTransientInputx(&transientinput[0],numaveragedinput);
+		analysis = new SmbAnalysis();
 		while(substep<dtslices){ //loop on sub dts
 			substep+=1;
@@ -75,13 +76,12 @@
          if(VerboseSolution()) _printf0_("sub iteration " << substep << "/" << dtslices << "  time [yr]: " << setprecision(4) << subtime/yts << " (time step: " << subdt/yts << ")\n");
          if(VerboseSolution()) _printf0_("   computing smb\n");
-         analysis = new SmbAnalysis();
 			if(VerboseSolution()) _printf0_("   Calling core\n");
 			analysis->Core(femmodel);
          /*If we have a sub-timestep we store the substep inputs in a transient input here*/
-         femmodel->StackTransientOutputx(&substepinput[0],&transientinput[0],subtime,numaveragedinput);
-			delete analysis;
+         femmodel->StackTransientInputx(&substepinput[0],&transientinput[0],subtime,numaveragedinput);
 		}
+		delete analysis;
       /*averaging the transient input*/
-		femmodel->AverageTransientOutputx(&transientinput[0],&averagedinput[0],global_time-dt,subtime,numaveragedinput);
+		femmodel->AverageTransientInputx(&transientinput[0],&averagedinput[0],global_time-dt,subtime,numaveragedinput);
 		/*and reset timesteping variables to original*/
 		femmodel->parameters->SetParam(global_time,TimeEnum);
Index: /issm/trunk/src/c/cores/thermal_core.cpp
===================================================================
--- /issm/trunk/src/c/cores/thermal_core.cpp	(revision 24685)
+++ /issm/trunk/src/c/cores/thermal_core.cpp	(revision 24686)
@@ -14,5 +14,5 @@
 
 	/*Start profiler*/
-        femmodel->profiler->Start(THERMALCORE);
+	femmodel->profiler->Start(THERMALCORE);
 
 	/*intermediary*/
@@ -34,4 +34,5 @@
 
 	if(isenthalpy){
+		femmodel->InputMakeDiscontinuous(BasalforcingsGroundediceMeltingRateEnum);
 		enthalpy_analysis = new EnthalpyAnalysis();
 		enthalpy_analysis->Core(femmodel);
Index: /issm/trunk/src/c/datastructures/DataSet.cpp
===================================================================
--- /issm/trunk/src/c/datastructures/DataSet.cpp	(revision 24685)
+++ /issm/trunk/src/c/datastructures/DataSet.cpp	(revision 24686)
@@ -215,58 +215,4 @@
 				this->AddObject(seg);
 			}
-			else if(obj_enum==BoolInputEnum){
-				BoolInput* boolinput=NULL;
-				boolinput=new BoolInput();
-				boolinput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(boolinput);
-			}
-			else if(obj_enum==DoubleInputEnum){
-				DoubleInput* doubleinput=NULL;
-				doubleinput=new DoubleInput();
-				doubleinput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(doubleinput);
-			}
-			else if(obj_enum==IntInputEnum){
-				IntInput* intinput=NULL;
-				intinput=new IntInput();
-				intinput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(intinput);
-			}
-			else if(obj_enum==ControlInputEnum){
-				ControlInput* cinput=NULL;
-				cinput=new ControlInput();
-				cinput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(cinput);
-			}
-			else if(obj_enum==TransientInputEnum){
-				TransientInput* transinput=NULL;
-				transinput=new TransientInput();
-				transinput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(transinput);
-			}
-			else if(obj_enum==TriaInputEnum){
-				TriaInput* triainput=NULL;
-				triainput=new TriaInput();
-				triainput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(triainput);
-			}
-			else if(obj_enum==PentaInputEnum){
-				PentaInput* pentainput=NULL;
-				pentainput=new PentaInput();
-				pentainput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(pentainput);
-			}
-			else if(obj_enum==TetraInputEnum){
-				TetraInput* tetrainput=NULL;
-				tetrainput=new TetraInput();
-				tetrainput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(tetrainput);
-			}
-			else if(obj_enum==SegInputEnum){
-				SegInput* seginput=NULL;
-				seginput=new SegInput();
-				seginput->Marshall(pmarshalled_data,pmarshalled_data_size,marshall_direction);
-				this->AddObject(seginput);
-			}
 			else if(obj_enum==RiftfrontEnum){
 				Riftfront* rift=NULL;
Index: /issm/trunk/src/c/main/esmfbinders.cpp
===================================================================
--- /issm/trunk/src/c/main/esmfbinders.cpp	(revision 24685)
+++ /issm/trunk/src/c/main/esmfbinders.cpp	(revision 24686)
@@ -114,5 +114,5 @@
 
 						/*Recover surface from the ISSM element: */
-						Input* surface_input = element->GetInput(SurfaceEnum); _assert_(surface_input);
+						Input2* surface_input = element->GetInput2(SurfaceEnum); _assert_(surface_input);
 						surface_input->GetInputAverage(&surface);
 
Index: /issm/trunk/src/c/modules/ConfigureObjectsx/ConfigureObjectsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ConfigureObjectsx/ConfigureObjectsx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ConfigureObjectsx/ConfigureObjectsx.cpp	(revision 24686)
@@ -9,32 +9,28 @@
 #include "../../classes/classes.h"
 
-int	ConfigureObjectsx( Elements* elements, Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters){
-
-	/*Intermediary*/
-	int       i;
-	int       noerr = 1;
-	int       configuration_type;
-	Element  *element            = NULL;
-	Load     *load               = NULL;
-	Material *material           = NULL;
+int	ConfigureObjectsx( Elements* elements, Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials,Parameters* parameters,Inputs2* inputs2){
 
 	/*Get analysis type: */
+	int configuration_type;
 	parameters->FindParam(&configuration_type,ConfigurationTypeEnum);
 
 	if(VerboseMProcessor()) _printf0_("      Configuring elements...\n");
-	for(i=0;i<elements->Size();i++){
-		element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-		element->Configure(elements,loads,nodes,vertices,materials,parameters);
+	for(int i=0;i<elements->Size();i++){
+		Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+		element->Configure(elements,loads,nodes,vertices,materials,parameters,inputs2);
 	}
 	if(VerboseMProcessor()) _printf0_("      Configuring loads...\n");
-	for(i=0;i<loads->Size();i++){
-		load=(Load*)loads->GetObjectByOffset(i);
+	for(int i=0;i<loads->Size();i++){
+		Load* load=(Load*)loads->GetObjectByOffset(i);
 		load->Configure(elements,loads,nodes,vertices,materials,parameters);
 	}
 	if(VerboseMProcessor()) _printf0_("      Configuring materials...\n");
-	for(i=0;i<materials->Size();i++){
-		material=(Material*)materials->GetObjectByOffset(i);
+	for(int i=0;i<materials->Size();i++){
+		Material* material=(Material*)materials->GetObjectByOffset(i);
 		material->Configure(elements);
 	}
-	return noerr;
+	if(VerboseMProcessor()) _printf0_("      Configuring inputs...\n");
+	inputs2->Configure(parameters);
+
+	return 1;
 }
Index: /issm/trunk/src/c/modules/ConfigureObjectsx/ConfigureObjectsx.h
===================================================================
--- /issm/trunk/src/c/modules/ConfigureObjectsx/ConfigureObjectsx.h	(revision 24685)
+++ /issm/trunk/src/c/modules/ConfigureObjectsx/ConfigureObjectsx.h	(revision 24686)
@@ -9,5 +9,5 @@
 
 /* local prototypes: */
-int ConfigureObjectsx( Elements* elements, Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials, Parameters* parameters);
+int ConfigureObjectsx( Elements* elements, Loads* loads, Nodes* nodes, Vertices* vertices, Materials* materials, Parameters* parameters,Inputs2* inputs2);
 
 #endif  /* _CONFIGUREOBJECTSX_H */
Index: /issm/trunk/src/c/modules/ConstraintsStatex/ConstraintsStatex.cpp
===================================================================
--- /issm/trunk/src/c/modules/ConstraintsStatex/ConstraintsStatex.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ConstraintsStatex/ConstraintsStatex.cpp	(revision 24686)
@@ -10,10 +10,17 @@
 void ConstraintsStatex(int* pconverged, int* pnum_unstable_constraints,FemModel* femmodel){
 
+	/*Early return if no rift and no penalties*/
+	if(femmodel->loads->numrifts == 0 && femmodel->loads->numpenalties == 0){
+		*pconverged                = 0;
+		*pnum_unstable_constraints = 0;
+		return;
+	}
+
 	/*output: */
-	int converged                     = 1;
-	int num_unstable_constraints      = 0;
-	int min_mechanical_constraints    = 0;
-	int  unstable                     = 0;
-	int  sum_num_unstable_constraints = 0;
+	int converged                    = 1;
+	int num_unstable_constraints     = 0;
+	int min_mechanical_constraints   = 0;
+	int unstable                     = 0;
+	int sum_num_unstable_constraints = 0;
 	int analysis_type;
 
@@ -26,17 +33,20 @@
 
 	/*Rift penalties first*/
-	if(RiftIsPresent(femmodel->loads,analysis_type)){
+	if(femmodel->loads->numrifts){
 		RiftConstraintsState(&converged,&num_unstable_constraints,femmodel->loads,min_mechanical_constraints,analysis_type);
 	}
 
 	/*Deal with pengrid*/
-	for(int i=0;i<femmodel->loads->Size();i++){
-		Load* load=(Load*)femmodel->loads->GetObjectByOffset(i);
-		if(load->ObjectEnum()==PengridEnum){
-			Pengrid* pengrid=(Pengrid*)load;
-			pengrid->ConstraintActivate(&unstable);
-			num_unstable_constraints += unstable;
+	if(femmodel->loads->numpenalties){
+		for(int i=0;i<femmodel->loads->Size();i++){
+			Load* load=(Load*)femmodel->loads->GetObjectByOffset(i);
+			if(load->ObjectEnum()==PengridEnum){
+				Pengrid* pengrid=(Pengrid*)load;
+				pengrid->ConstraintActivate(&unstable);
+				num_unstable_constraints += unstable;
+			}
 		}
 	}
+
 	ISSM_MPI_Reduce(&num_unstable_constraints,&sum_num_unstable_constraints,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() );
 	ISSM_MPI_Bcast(&sum_num_unstable_constraints,1,ISSM_MPI_INT,0,IssmComm::GetComm());                
Index: /issm/trunk/src/c/modules/ConstraintsStatex/ConstraintsStatex.h
===================================================================
--- /issm/trunk/src/c/modules/ConstraintsStatex/ConstraintsStatex.h	(revision 24685)
+++ /issm/trunk/src/c/modules/ConstraintsStatex/ConstraintsStatex.h	(revision 24686)
@@ -9,5 +9,4 @@
 
 /* local prototypes: */
-int  RiftIsPresent(Loads* loads,int analysis_type);
 void ConstraintsStatex(int* pconverged, int* pnum_unstable_constraints,FemModel* femmodel);
 
Index: /issm/trunk/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp
===================================================================
--- /issm/trunk/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ConstraintsStatex/RiftConstraintsState.cpp	(revision 24686)
@@ -2,35 +2,8 @@
  * \brief: manage penalties for rifts 
  */
-
 #include "./ConstraintsStateLocal.h"
 #include "../../shared/shared.h"
 
-#define _ZIGZAGCOUNTER_
-
 /*current module: */
-/*RiftIsPresent(Loads* loads,int configuration_type){{{*/
-int RiftIsPresent(Loads* loads,int configuration_type){
-
-	int i;
-
-	int found=0;
-	int mpi_found=0;
-
-	/*go though loads, and figure out if one of the loads is a Riftfront: */
-	for (i=0;i<loads->Size();i++){
-		Load* load=(Load*)loads->GetObjectByOffset(i);
-		if(RiftfrontEnum==loads->GetEnum(i)){
-			found=1;
-			break;
-		}
-	}
-
-	ISSM_MPI_Reduce (&found,&mpi_found,1,ISSM_MPI_INT,ISSM_MPI_SUM,0,IssmComm::GetComm() );
-	ISSM_MPI_Bcast(&mpi_found,1,ISSM_MPI_INT,0,IssmComm::GetComm());                
-	found=mpi_found;
-
-	return found;
-}
-/*}}}*/
 /*RiftConstraintsState(int* pconverged, int* pnum_unstable_constraints,Loads* loads,int min_mechanical_constraints,int configuration_type){{{*/
 void RiftConstraintsState(int* pconverged, int* pnum_unstable_constraints,Loads* loads,int min_mechanical_constraints,int configuration_type){
Index: /issm/trunk/src/c/modules/Damagex/Damagex.cpp
===================================================================
--- /issm/trunk/src/c/modules/Damagex/Damagex.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/Damagex/Damagex.cpp	(revision 24686)
@@ -4,4 +4,5 @@
 
 #include "./Damagex.h"
+#include "../InputDuplicatex/InputDuplicatex.h"
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
@@ -17,4 +18,6 @@
 		case 0:
 			if(VerboseModule()) _printf0_("   computing damage analytically\n");
+			InputDuplicatex(femmodel,DamageDEnum,DamageDOldEnum);
+			InputDuplicatex(femmodel,DamageDbarEnum,DamageDbarOldEnum);
 			femmodel->ElementOperationx(&Element::ComputeNewDamage);
 			break;
Index: /issm/trunk/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/DragCoefficientAbsGradientx/DragCoefficientAbsGradientx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void DragCoefficientAbsGradientx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -59,6 +60,6 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=basalelement->GetInput(InversionCostFunctionsCoefficientsEnum);   _assert_(weights_input);
-	Input* drag_input   =basalelement->GetInput(FrictionCoefficientEnum); _assert_(drag_input);
+	DatasetInput2* weights_input=basalelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum);   _assert_(weights_input);
+	Input2* drag_input   =basalelement->GetInput2(FrictionCoefficientEnum); _assert_(drag_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/modules/FloatingiceMeltingRatePicox/FloatingiceMeltingRatePicox.cpp
===================================================================
--- /issm/trunk/src/c/modules/FloatingiceMeltingRatePicox/FloatingiceMeltingRatePicox.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/FloatingiceMeltingRatePicox/FloatingiceMeltingRatePicox.cpp	(revision 24686)
@@ -4,4 +4,5 @@
 
 #include "./FloatingiceMeltingRatePicox.h"
+#include "../InputDuplicatex/InputDuplicatex.h"
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
@@ -17,5 +18,5 @@
 		int numvertices = element->GetNumberOfVertices();
 		IssmDouble* values = xNewZeroInit<IssmDouble>(numvertices);
-		element->AddInput(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
+		element->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
 		xDelete<IssmDouble>(values);
 	}
@@ -45,8 +46,8 @@
 	IssmDouble* dmax_basin_cpu=xNew<IssmDouble>(num_basins);
 
-	femmodel->elements->InputDuplicate(MaskGroundediceLevelsetEnum,DistanceToGroundinglineEnum);
+	InputDuplicatex(femmodel,MaskGroundediceLevelsetEnum,DistanceToGroundinglineEnum);
 	femmodel->DistanceToFieldValue(MaskGroundediceLevelsetEnum,0.,DistanceToGroundinglineEnum);
 
-	femmodel->elements->InputDuplicate(MaskIceLevelsetEnum,DistanceToCalvingfrontEnum);
+	InputDuplicatex(femmodel,MaskIceLevelsetEnum,DistanceToCalvingfrontEnum);
 	femmodel->DistanceToFieldValue(MaskIceLevelsetEnum,0.,DistanceToCalvingfrontEnum);
 
@@ -60,5 +61,5 @@
 		distances=xNew<IssmDouble>(numvertices);
 		element->GetInputListOnVertices(&distances[0],DistanceToGroundinglineEnum);
-		element->inputs->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
+		element->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
 		for(int k=0; k<numvertices; k++){
 			if(fabs(distances[k])>maxdist_cpu){maxdist_cpu=fabs(distances[k]);}
@@ -116,6 +117,6 @@
 		Element* basalelement = element->SpawnBasalElement();
 		if(!basalelement->IsIceInElement() || !basalelement->IsFloating()) continue;
-		basalelement->inputs->GetInputValue(&boxid,BasalforcingsPicoBoxIdEnum);
-		basalelement->inputs->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
+		basalelement->GetInputValue(&boxid,BasalforcingsPicoBoxIdEnum);
+		basalelement->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
 		boxareas[basinid*maxbox+boxid]+=basalelement->GetHorizontalSurfaceArea();
 		basalelement->FindParam(&domaintype,DomainTypeEnum);
@@ -165,11 +166,11 @@
 		if(!basalelement->IsIceInElement() || !basalelement->IsFloating()) continue;
 		int el_boxid;
-		basalelement->inputs->GetInputValue(&el_boxid,BasalforcingsPicoBoxIdEnum);
+		basalelement->GetInputValue(&el_boxid,BasalforcingsPicoBoxIdEnum);
 		if(el_boxid!=boxid) continue;
 
-		Input* tocs_input=basalelement->GetInput(BasalforcingsPicoSubShelfOceanTempEnum); _assert_(tocs_input); 
-		Input* socs_input=basalelement->GetInput(BasalforcingsPicoSubShelfOceanSalinityEnum); _assert_(socs_input);
-
-		basalelement->inputs->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
+		Input2* tocs_input=basalelement->GetInput2(BasalforcingsPicoSubShelfOceanTempEnum); _assert_(tocs_input); 
+		Input2* socs_input=basalelement->GetInput2(BasalforcingsPicoSubShelfOceanSalinityEnum); _assert_(socs_input);
+
+		basalelement->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
 		Gauss* gauss=basalelement->NewGauss(1); gauss->GaussPoint(0);
 		tocs_input->GetInputValue(&toc,gauss);
@@ -206,10 +207,10 @@
 			if(!basalelement->IsIceInElement() || !basalelement->IsFloating()) continue;
 			int el_boxid;
-			basalelement->inputs->GetInputValue(&el_boxid,BasalforcingsPicoBoxIdEnum);
+			basalelement->GetInputValue(&el_boxid,BasalforcingsPicoBoxIdEnum);
 			if(el_boxid!=boxid) continue;
 
-	     	Input* overturnings_input=basalelement->GetInput(BasalforcingsPicoSubShelfOceanOverturningEnum); _assert_(overturnings_input);
-
-			basalelement->inputs->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
+	     	Input2* overturnings_input=basalelement->GetInput2(BasalforcingsPicoSubShelfOceanOverturningEnum); _assert_(overturnings_input);
+
+			basalelement->GetInputValue(&basinid,BasalforcingsPicoBasinIdEnum);
 			Gauss* gauss=basalelement->NewGauss(1); gauss->GaussPoint(0);
 			overturnings_input->GetInputValue(&overturning,gauss);
Index: /issm/trunk/src/c/modules/FloatingiceMeltingRatex/FloatingiceMeltingRatex.cpp
===================================================================
--- /issm/trunk/src/c/modules/FloatingiceMeltingRatex/FloatingiceMeltingRatex.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/FloatingiceMeltingRatex/FloatingiceMeltingRatex.cpp	(revision 24686)
@@ -6,4 +6,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "./../../classes/Inputs2/DatasetInput2.h"
 
 void FloatingiceMeltingRatex(FemModel* femmodel){/*{{{*/
@@ -102,6 +103,6 @@
 		if(!element->IsIceInElement() || !element->IsFloating() || !element->IsOnBase()){
 			IssmDouble* values = xNewZeroInit<IssmDouble>(numvertices);
-			element->AddInput(BasalforcingsFloatingiceMeltingRateEnum,values,P1Enum);
-			element->AddInput(BasalforcingsIsmip6TfShelfEnum,values,P1Enum);
+			element->AddInput2(BasalforcingsFloatingiceMeltingRateEnum,values,P1DGEnum);
+			element->AddInput2(BasalforcingsIsmip6TfShelfEnum,values,P1DGEnum);
 			xDelete<IssmDouble>(values);
 			continue;
@@ -109,7 +110,7 @@
 
 		/*Get TF on all vertices*/
-		IssmDouble* tf_test        = xNew<IssmDouble>(numvertices);
-		IssmDouble* depth_vertices = xNew<IssmDouble>(numvertices);
-		Input*      tf_input = element->GetInput(BasalforcingsIsmip6TfEnum); _assert_(tf_input);
+		IssmDouble*    tf_test        = xNew<IssmDouble>(numvertices);
+		IssmDouble*    depth_vertices = xNew<IssmDouble>(numvertices);
+		DatasetInput2* tf_input = element->GetDatasetInput2(BasalforcingsIsmip6TfEnum); _assert_(tf_input);
 
 		element->GetInputListOnVertices(&depth_vertices[0],BaseEnum);
@@ -148,5 +149,5 @@
 		}
 
-		element->AddInput(BasalforcingsIsmip6TfShelfEnum,tf_test,P1Enum);
+		element->AddInput2(BasalforcingsIsmip6TfShelfEnum,tf_test,P1DGEnum);
 		xDelete<IssmDouble>(tf_test);
 		xDelete<IssmDouble>(depth_vertices);
@@ -161,6 +162,6 @@
 			/*Spawn basal element if on base to compute element area*/
 			Element* basalelement = element->SpawnBasalElement();
-			Input* tf_input=basalelement->GetInput(BasalforcingsIsmip6TfShelfEnum); _assert_(tf_input);
-			basalelement->inputs->GetInputValue(&basinid,BasalforcingsIsmip6BasinIdEnum);
+			Input2* tf_input=basalelement->GetInput2(BasalforcingsIsmip6TfShelfEnum); _assert_(tf_input);
+			basalelement->GetInputValue(&basinid,BasalforcingsIsmip6BasinIdEnum);
 			Gauss* gauss=basalelement->NewGauss(1); gauss->GaussPoint(0);
 			tf_input->GetInputValue(&tf,gauss);
Index: /issm/trunk/src/c/modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/GetVectorFromControlInputsx/GetVectorFromControlInputsx.cpp	(revision 24686)
@@ -102,7 +102,7 @@
 
 	/*Get active vector first*/
-	Vector<IssmDouble>*   activevector=NULL;
-	IssmPDouble*          vector=NULL;
-	int                   size;
+	Vector<IssmDouble> *activevector = NULL;
+	IssmPDouble        *vector       = NULL;
+	int                size;
 
 	/*Retrieve some parameters*/
Index: /issm/trunk/src/c/modules/GetVectorFromInputsx/GetVectorFromInputsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/GetVectorFromInputsx/GetVectorFromInputsx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/GetVectorFromInputsx/GetVectorFromInputsx.cpp	(revision 24686)
@@ -87,5 +87,5 @@
 	/*We go find the input of the first element, and query its interpolation type: */
 	Element* element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(0));
-	Input* input=element->GetInput(name); 
+	Input2* input=element->GetInput2(name); 
 	if (!input) _error_("could not find input: " << name);
 
Index: /issm/trunk/src/c/modules/InputDuplicatex/InputDuplicatex.cpp
===================================================================
--- /issm/trunk/src/c/modules/InputDuplicatex/InputDuplicatex.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/InputDuplicatex/InputDuplicatex.cpp	(revision 24686)
@@ -9,8 +9,4 @@
 
 void InputDuplicatex(FemModel* femmodel,int original_enum, int new_enum){
-	/*Go through elemnets, and ask to reinitialie the input: */
-	for(int i=0;i<femmodel->elements->Size();i++){
-		Element* element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
-		element->InputDuplicate(original_enum,new_enum);
-	}
+	femmodel->inputs2->DuplicateInput(original_enum,new_enum);
 }
Index: /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp
===================================================================
--- /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.cpp	(revision 24686)
@@ -6,4 +6,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/Inputs2.h"
 
 void InputUpdateFromConstantx(FemModel* femmodel,bool constant, int name){
@@ -49,5 +50,5 @@
 	}
 }
-void InputUpdateFromConstantx(Elements* elements,IssmDouble constant, int name){
+void InputUpdateFromConstantx(Inputs2* inputs2,Elements* elements,IssmDouble constant, int name){
 
 	if(VerboseModule()) _printf0_("   Input updates from constant\n");
@@ -56,5 +57,30 @@
 	for(int i=0;i<elements->Size();i++){
 		Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
-		element->InputUpdateFromConstant(constant,name);
+		element->SetElementInput(inputs2,name,constant);
 	}
 }
+void InputUpdateFromConstantx(Inputs2* inputs2,Elements* elements,bool constant, int name){
+
+	if(VerboseModule()) _printf0_("   Input updates from constant\n");
+
+	/*Elements and loads drive the update: */
+	for(int i=0;i<elements->Size();i++){
+		Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+		element->SetBoolInput(inputs2,name,constant);
+	}
+}
+#ifdef _HAVE_AD_
+void InputUpdateFromConstantx(Inputs2* inputs2,Elements* elements,IssmPDouble constant, int name){
+
+	if(VerboseModule()) _printf0_("   Input updates from constant\n");
+
+	/*Convert to active variable!*/
+	IssmDouble constant2 = constant;
+
+	/*Elements and loads drive the update: */
+	for(int i=0;i<elements->Size();i++){
+		Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(i));
+		element->SetElementInput(inputs2,name,constant2);
+	}
+}
+#endif
Index: /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h
===================================================================
--- /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h	(revision 24685)
+++ /issm/trunk/src/c/modules/InputUpdateFromConstantx/InputUpdateFromConstantx.h	(revision 24686)
@@ -7,4 +7,5 @@
 
 #include "../../classes/classes.h"
+class Inputs2;
 
 /* local prototypes: */
@@ -12,5 +13,9 @@
 void InputUpdateFromConstantx(FemModel* femmodel,int        constant,int name);
 void InputUpdateFromConstantx(FemModel* femmodel,IssmDouble constant,int name);
-void InputUpdateFromConstantx(Elements* elements,IssmDouble constant,int name);
+#ifdef _HAVE_AD_
+void InputUpdateFromConstantx(Inputs2* inputs2,Elements* elements,IssmPDouble constant,int name);
+#endif
+void InputUpdateFromConstantx(Inputs2* inputs2,Elements* elements,IssmDouble constant,int name);
+void InputUpdateFromConstantx(Inputs2* inputs2,Elements* elements,bool       constant,int name);
 
 #endif  /* _UPDATEINPUTSFROMCONSTANTXX_H */
Index: /issm/trunk/src/c/modules/KillIcebergsx/KillIcebergsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/KillIcebergsx/KillIcebergsx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/KillIcebergsx/KillIcebergsx.cpp	(revision 24686)
@@ -114,5 +114,5 @@
 				IssmDouble* values = xNew<IssmDouble>(numvertices);
 				for(int j=0;j<numvertices;j++) values[j] = 1.; /*Anything >0 = no ice*/
-				element->AddInput(MaskIceLevelsetEnum,values,P1Enum);
+				element->AddInput2(MaskIceLevelsetEnum,values,P1Enum);
 				xDelete<IssmDouble>(values);
 			}
Index: /issm/trunk/src/c/modules/MeshPartitionx/MeshPartitionx.h
===================================================================
--- /issm/trunk/src/c/modules/MeshPartitionx/MeshPartitionx.h	(revision 24685)
+++ /issm/trunk/src/c/modules/MeshPartitionx/MeshPartitionx.h	(revision 24686)
@@ -10,6 +10,6 @@
 /* local prototypes: */
 template <class doubletype> 
-int MeshPartitionx(int** pepart, int** pnpart, int numberofelements,int numberofnodes,int* elements,
-		int numberofelements2d,int numberofnodes2d,doubletype* elements2d,int numlayers,int elements_width, int meshelementtype,int num_procs){
+int MeshPartitionx(int** pepart,int** pnpart,int numberofelements,int numberofnodes,int* elements,
+		int numberofelements2d,int numberofnodes2d,doubletype* elements2d,int* vweights,int numlayers,int elements_width, int meshelementtype,int num_procs){
 
 	int noerr=1;
@@ -26,8 +26,4 @@
 	int  count=0;
 
-	int  etype=1; //tria mesh see metis/Programs/Io.c
-	int  etype2d=1; //tria mesh see metis/Programs/Io.c
-	int  numflag=0;
-	int  edgecut=1;
 
 	switch(meshelementtype){
@@ -46,5 +42,5 @@
 			if (num_procs>1){
 #ifdef _HAVE_METIS_
-				METIS_PartMeshNodalPatch(&numberofelements,&numberofnodes, index, &etype, &numflag, &num_procs, &edgecut, epart, npart);
+				METIS_PartMeshNodalPatch(numberofelements,numberofnodes,index,vweights,num_procs,epart, npart);
 #else
 				_error_("metis has not beed installed. Cannot run with more than 1 cpu");
@@ -75,5 +71,5 @@
 			if (num_procs>1){
 #ifdef _HAVE_METIS_
-				METIS_PartMeshNodalPatch(&numberofelements2d,&numberofnodes2d, index2d, &etype2d, &numflag, &num_procs, &edgecut, epart2d, npart2d);
+				METIS_PartMeshNodalPatch(numberofelements2d,numberofnodes2d,index2d,vweights,num_procs,epart2d,npart2d);
 #else
 				_error_("metis has not beed installed. Cannot run with more than 1 cpu");
Index: /issm/trunk/src/c/modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Control/UpdateElementsAndMaterialsControl.cpp	(revision 24686)
@@ -9,14 +9,24 @@
 #include "../ModelProcessorx.h"
 
-void	UpdateElementsAndMaterialsControl(Elements* elements,Parameters* parameters,Materials* materials, IoModel* iomodel){
+void	UpdateElementsAndMaterialsControl(Elements* elements,Parameters* parameters,Inputs2* inputs2,Materials* materials, IoModel* iomodel){
 	/*Intermediary*/
 	bool       control_analysis;
+	int        M,N;
 	int        control,cost_function,domaintype;
 	int        num_controls,num_cost_functions;
-	Element   *element          = NULL;
-	Material  *material         = NULL;
-	int       *control_enums    = NULL;
-	char     **controls         = NULL;
-	char     **cost_functions   = NULL;
+	IssmDouble yts,scale;
+	Element     *element          = NULL;
+	Material    *material         = NULL;
+	int         *control_enums    = NULL;
+	char       **controls         = NULL;
+	char       **cost_functions   = NULL;
+	IssmDouble  *independent      = NULL;
+	IssmDouble  *independents_min = NULL;
+	IssmDouble  *independents_max = NULL;
+	IssmDouble  *weights          = NULL;
+
+	/*Fetch parameters: */
+	iomodel->FindConstant(&control_analysis,"md.inversion.iscontrol");
+	if(!control_analysis) return;
 
 	/*Fetch parameters: */
@@ -24,16 +34,10 @@
 	iomodel->FindConstant(&isautodiff,"md.autodiff.isautodiff");
 	if(isautodiff){
-		UpdateElementsAndMaterialsControlAD(elements,parameters,materials,iomodel);
+		UpdateElementsAndMaterialsControlAD(elements,parameters,inputs2,materials,iomodel);
 		return;
 	}
 
-	/*Fetch parameters: */
-	iomodel->FindConstant(&control_analysis,"md.inversion.iscontrol");
-	if(control_analysis) iomodel->FindConstant(&num_controls,"md.inversion.num_control_parameters");
-
-	/*Now, return if no control*/
-	if(!control_analysis) return;
-
 	/*Process controls and convert from string to enums*/
+	iomodel->FindConstant(&num_controls,"md.inversion.num_control_parameters");
 	iomodel->FindConstant(&controls,&num_controls,"md.inversion.control_parameters");
 	if(num_controls<1) _error_("no controls found");
@@ -52,13 +56,23 @@
 	}
 
-	iomodel->FetchData(3,"md.inversion.cost_functions_coefficients","md.inversion.min_parameters","md.inversion.max_parameters");
-
-	/*Fetch Observations */
+	/*Fetch Observations and add to inputs*/
 	iomodel->FindConstant(&domaintype,"md.mesh.domain_type");
+	iomodel->FindConstant(&yts,"md.constants.yts");
+	iomodel->FetchData(&weights,&M,&N,"md.inversion.cost_functions_coefficients");
+
+	/*Transpose weights for simplicity!*/
+	if(M*N && N>1){
+		IssmDouble* weights_transp = xNew<IssmDouble>(M*N);
+		for(int i=0;i<M;i++) for(int j=0;j<N;j++) weights_transp[j*M+i] = weights[i*N+j];
+		xDelete<IssmDouble>(weights);
+		weights = weights_transp;
+	}
+
+	if(M!=iomodel->numberofvertices && N!=num_cost_functions) _error_("not supported");
 	for(int i=0;i<num_cost_functions;i++){
 		cost_function=cost_function_enums[i];
-		if(     cost_function==ThicknessAbsMisfitEnum) iomodel->FetchDataToInput(elements,"md.inversion.thickness_obs",InversionThicknessObsEnum);
-		else if(cost_function==SurfaceAbsMisfitEnum)   iomodel->FetchDataToInput(elements,"md.inversion.surface_obs",InversionSurfaceObsEnum);
-		else if(cost_function==RheologyBInitialguessMisfitEnum) iomodel->FetchDataToInput(elements,"md.materials.rheology_B",RheologyBInitialguessEnum);
+		if(     cost_function==ThicknessAbsMisfitEnum) iomodel->FetchDataToInput(inputs2,elements,"md.inversion.thickness_obs",InversionThicknessObsEnum);
+		else if(cost_function==SurfaceAbsMisfitEnum)   iomodel->FetchDataToInput(inputs2,elements,"md.inversion.surface_obs",InversionSurfaceObsEnum);
+		else if(cost_function==RheologyBInitialguessMisfitEnum) iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",RheologyBInitialguessEnum);
 		else if(cost_function==SurfaceAbsVelMisfitEnum
 			  || cost_function==SurfaceRelVelMisfitEnum
@@ -66,41 +80,73 @@
 			  || cost_function==SurfaceLogVxVyMisfitEnum
 			  || cost_function==SurfaceAverageVelMisfitEnum){
-			iomodel->FetchDataToInput(elements,"md.inversion.vx_obs",InversionVxObsEnum);
-			if(domaintype!=Domain2DverticalEnum) iomodel->FetchDataToInput(elements,"md.inversion.vy_obs",InversionVyObsEnum); 
+			iomodel->FetchDataToInput(inputs2,elements,"md.inversion.vx_obs",InversionVxObsEnum);
+			if(domaintype!=Domain2DverticalEnum) iomodel->FetchDataToInput(inputs2,elements,"md.inversion.vy_obs",InversionVyObsEnum); 
+		}
+		for(int j=0;j<elements->Size();j++){
+			Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
+			element->DatasetInputAdd(InversionCostFunctionsCoefficientsEnum,&weights[i*iomodel->numberofvertices],inputs2,iomodel,M,1,1,cost_function,7,cost_function);
 		}
 	}
 	parameters->AddObject(new IntParam(ControlInputSizeMEnum,iomodel->numberofvertices));
+	xDelete<IssmDouble>(weights);
+
+	/*Get controls*/
+	iomodel->FetchData(&independents_min,&M,&N,"md.inversion.min_parameters");
+	if(M!=iomodel->numberofvertices && N!=num_controls) _error_("not supported");
+	iomodel->FetchData(&independents_max,&M,&N,"md.inversion.max_parameters");
+	if(M!=iomodel->numberofvertices && N!=num_controls) _error_("not supported");
+
+	/*Transpose weights for simplicity!*/
+	if(M*N && N>1){
+		IssmDouble* independents_min_transp = xNew<IssmDouble>(M*N);
+		for(int i=0;i<M;i++) for(int j=0;j<N;j++) independents_min_transp[j*M+i] = independents_min[i*N+j];
+		xDelete<IssmDouble>(independents_min);
+		independents_min = independents_min_transp;
+
+		IssmDouble* independents_max_transp = xNew<IssmDouble>(M*N);
+		for(int i=0;i<M;i++) for(int j=0;j<N;j++) independents_max_transp[j*M+i] = independents_max[i*N+j];
+		xDelete<IssmDouble>(independents_max);
+		independents_max = independents_max_transp;
+	}
 
 	for(int i=0;i<num_controls;i++){
 		control = control_enums[i];
+		scale   = 1.;
+
 		switch(control){
 			/*List of supported controls*/
-			case BalancethicknessThickeningRateEnum:      iomodel->FetchData(1,"md.balancethickness.thickening_rate"); break;
-			case BalancethicknessSpcthicknessEnum:        iomodel->FetchData(1,"md.balancethickness.spcthickness"); break;
-			case VxEnum:                                  iomodel->FetchData(1,"md.initialization.vx"); break;
-			case VyEnum:                                  iomodel->FetchData(1,"md.initialization.vy"); break;
-			case ThicknessEnum:                           iomodel->FetchData(1,"md.geometry.thickness"); break;
-			case FrictionCoefficientEnum:                 iomodel->FetchData(1,"md.friction.coefficient"); break;
-			case FrictionAsEnum:                          iomodel->FetchData(1,"md.friction.As"); break;
-			case BalancethicknessApparentMassbalanceEnum: iomodel->FetchData(1,"md.balancethickness.apparent_massbalance"); break;
-			case BalancethicknessOmegaEnum:               iomodel->FetchData(1,"md.balancethickness.omega"); break;
-			case MaterialsRheologyBEnum:                  iomodel->FetchData(1,"md.materials.rheology_B"); break;
+			case BalancethicknessThickeningRateEnum:      iomodel->FetchData(&independent,&M,&N,"md.balancethickness.thickening_rate");scale = 1./yts; break; 
+			case BalancethicknessSpcthicknessEnum:        iomodel->FetchData(&independent,&M,&N,"md.balancethickness.spcthickness");                   break; 
+			case VxEnum:                                  iomodel->FetchData(&independent,&M,&N,"md.initialization.vx");scale = 1./yts;                break; 
+			case VyEnum:                                  iomodel->FetchData(&independent,&M,&N,"md.initialization.vy");scale = 1./yts;                break; 
+			case ThicknessEnum:                           iomodel->FetchData(&independent,&M,&N,"md.geometry.thickness");                              break; 
+			case FrictionCoefficientEnum:                 iomodel->FetchData(&independent,&M,&N,"md.friction.coefficient");                            break; 
+			case FrictionAsEnum:                          iomodel->FetchData(&independent,&M,&N,"md.friction.As");                                     break; 
+			case BalancethicknessApparentMassbalanceEnum: iomodel->FetchData(&independent,&M,&N,"md.balancethickness.apparent_massbalance");           break; 
+			case BalancethicknessOmegaEnum:               iomodel->FetchData(&independent,&M,&N,"md.balancethickness.omega");                          break; 
+			case MaterialsRheologyBEnum:                  iomodel->FetchData(&independent,&M,&N,"md.materials.rheology_B");                            break; 
 			/*Special cases*/
-			case MaterialsRheologyBbarEnum: iomodel->FetchData(1,"md.materials.rheology_B"); break;
-			case DamageDbarEnum:            iomodel->FetchData(1,"md.damage.D");            break;
+			case MaterialsRheologyBbarEnum:               iomodel->FetchData(&independent,&M,&N,"md.materials.rheology_B");                            break; 
+			case DamageDbarEnum:                          iomodel->FetchData(&independent,&M,&N,"md.damage.D");                                        break; 
 			default:
 				_error_("Control " << EnumToStringx(control) << " not implemented yet");
 		}
-	}
-
-	/*Update elements: */
-	int counter=0;
-	for(int i=0;i<iomodel->numberofelements;i++){
-		if(iomodel->my_elements[i]){
-			element=(Element*)elements->GetObjectByOffset(counter);
-			element->InputUpdateFromIoModel(i,iomodel); //we need i to index into elements.
-			counter++;
-		}
-	}
+		if(M!=iomodel->numberofvertices && N!=1) _error_("not supported yet");
+
+		/*Special case if 3d*/
+		if(iomodel->domaintype==Domain3DEnum){
+			if(control==MaterialsRheologyBbarEnum) control=MaterialsRheologyBEnum;
+			if(control==DamageDbarEnum)            control=DamageDEnum;
+		}
+
+		for(int j=0;j<elements->Size();j++){
+			Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
+			element->ControlInputCreate(independent,&independents_min[i*iomodel->numberofvertices],&independents_max[i*iomodel->numberofvertices],inputs2,iomodel,M,N,scale,control,i+1);
+		}
+		xDelete<IssmDouble>(independent);
+	}
+	xDelete<IssmDouble>(independents_min);
+	xDelete<IssmDouble>(independents_max);
+
 
 	/*Free data: */
@@ -126,5 +172,4 @@
 	}
 
-	iomodel->DeleteData(3,"md.inversion.cost_functions_coefficients","md.inversion.min_parameters","md.inversion.max_parameters");
 	xDelete<int>(control_enums);
 	xDelete<int>(cost_function_enums);
@@ -134,5 +179,5 @@
 	xDelete<char*>(controls);
 }
-void UpdateElementsAndMaterialsControlAD(Elements* elements,Parameters* parameters,Materials* materials, IoModel* iomodel){
+void UpdateElementsAndMaterialsControlAD(Elements* elements,Parameters* parameters,Inputs2* inputs2,Materials* materials, IoModel* iomodel){
 
 	#if defined(_HAVE_AD_)
@@ -206,5 +251,5 @@
 			for(int j=0;j<elements->Size();j++){
 				Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(j));
-				element->ControlInputCreate(independent,independents_min,independents_max,iomodel,M,N,input_enum,i+1);
+				element->ControlInputCreate(independent,independents_min,independents_max,inputs2,iomodel,M,N,1.,input_enum,i+1);
 			}
 			xDelete<IssmDouble>(independent);
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateElementsVerticesAndMaterials.cpp	(revision 24686)
@@ -48,14 +48,22 @@
 	/*Create elements*/
 	if(control_analysis && !adolc_analysis)iomodel->FetchData(2,"md.inversion.min_parameters","md.inversion.max_parameters");
-
+	if(iomodel->domaintype==Domain2DverticalEnum || iomodel->domaindim==3)  iomodel->FetchData(2,"md.mesh.vertexonbase","md.mesh.vertexonsurface");
+
+	int count = 0;
 	switch(iomodel->meshelementtype){
 		case TriaEnum:
 			for(int i=0;i<iomodel->numberofelements;i++){
-				if(iomodel->my_elements[i]) elements->AddObject(new Tria(i+1,i,iomodel,nummodels));
+				if(iomodel->my_elements[i]){
+					elements->AddObject(new Tria(i+1,i,count,iomodel,nummodels));
+					count++;
+				}
 			}
 			break;
 		case TetraEnum:
 			for(int i=0;i<iomodel->numberofelements;i++){
-				if(iomodel->my_elements[i]) elements->AddObject(new Tetra(i+1,i,iomodel,nummodels));
+				if(iomodel->my_elements[i]){
+					elements->AddObject(new Tetra(i+1,i,count,iomodel,nummodels));
+					count++;
+				}
 			}
 			break;
@@ -63,5 +71,8 @@
 			iomodel->FetchData(2,"md.mesh.upperelements","md.mesh.lowerelements");
 			for(int i=0;i<iomodel->numberofelements;i++){
-				if(iomodel->my_elements[i]) elements->AddObject(new Penta(i+1,i,iomodel,nummodels));
+				if(iomodel->my_elements[i]){
+					elements->AddObject(new Penta(i+1,i,count,iomodel,nummodels));
+					count++;
+				}
 			}
 			break;
@@ -71,7 +82,8 @@
 
 	/*Free data: */
-	iomodel->DeleteData(4,"md.mesh.upperelements","md.mesh.lowerelements","md.inversion.min_parameters","md.inversion.max_parameters");
-}/*}}}*/
-void CreateMaterials(Elements* elements,Materials* materials,IoModel* iomodel,const int nummodels){/*{{{*/
+	iomodel->DeleteData(6,"md.mesh.upperelements","md.mesh.lowerelements","md.inversion.min_parameters","md.inversion.max_parameters","md.mesh.vertexonbase","md.mesh.vertexonsurface");
+
+}/*}}}*/
+void CreateMaterials(Elements* elements,Inputs2* inputs2,Materials* materials,IoModel* iomodel,const int nummodels){/*{{{*/
 
 	/*Intermediary*/
@@ -84,5 +96,5 @@
 	iomodel->FindConstant(&materials_type,"md.materials.type");
 
-	/*Did we already create the materiaas? : */
+	/*Did we already create the materials? : */
 	_assert_(materials->Size()==0);
 
@@ -90,10 +102,10 @@
 	switch(materials_type){
 		case MaticeEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
 			for (i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matice(i+1,i,iomodel));
 			switch(iomodel->domaindim){
 				case 2:
-					elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
 					break;
 				case 3:
@@ -104,12 +116,12 @@
 			break;
 		case MatenhancediceEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
 			for (i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matice(i+1,i,iomodel));
 			switch(iomodel->domaindim){
 				case 2:
-					elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
-					elements->InputDuplicate(MaterialsRheologyEEnum,MaterialsRheologyEbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyEEnum,MaterialsRheologyEbarEnum);
 					break;
 				case 3:
@@ -120,12 +132,12 @@
 			break;
 		case MatdamageiceEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
-			iomodel->FetchDataToInput(elements,"md.damage.D",DamageDEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.damage.D",DamageDEnum);
 			for (i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matice(i+1,i,iomodel));
 			switch(iomodel->domaindim){
 				case 2:
-					elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
-					elements->InputDuplicate(DamageDEnum,DamageDbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+					inputs2->DuplicateInput(DamageDEnum,DamageDbarEnum);
 					break;
 				case 3:
@@ -136,13 +148,13 @@
 			break;
 		case MatestarEnum:
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
-			iomodel->FetchDataToInput(elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
+			iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
 			for(i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matestar(i+1,i,iomodel));
 			switch(iomodel->domaindim){
 				case 2:
-					elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
-					elements->InputDuplicate(MaterialsRheologyEcEnum,MaterialsRheologyEcbarEnum);
-					elements->InputDuplicate(MaterialsRheologyEsEnum,MaterialsRheologyEsbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyEcEnum,MaterialsRheologyEcbarEnum);
+					inputs2->DuplicateInput(MaterialsRheologyEsEnum,MaterialsRheologyEsbarEnum);
 					break;
 				case 3:
@@ -170,10 +182,10 @@
 				switch(IoCodeToEnumMaterials(nature[i])){ //{{{
 					case MaticeEnum:
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
 						for (i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matice(i+1,i,iomodel));
 						switch(iomodel->domaindim){
 							case 2:
-								elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
 								break;
 							case 3:
@@ -190,12 +202,12 @@
 
 					case MatenhancediceEnum:
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_E",MaterialsRheologyEEnum);
 						for (i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matice(i+1,i,iomodel));
 						switch(iomodel->domaindim){
 							case 2:
-								elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
-								elements->InputDuplicate(MaterialsRheologyEEnum,MaterialsRheologyEbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyEEnum,MaterialsRheologyEbarEnum);
 								break;
 							case 3:
@@ -206,12 +218,12 @@
 						break;
 					case MatdamageiceEnum:
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
-						iomodel->FetchDataToInput(elements,"md.damage.D",DamageDEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_n",MaterialsRheologyNEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.damage.D",DamageDEnum);
 						for (i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matice(i+1,i,iomodel));
 						switch(iomodel->domaindim){
 							case 2:
-								elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
-								elements->InputDuplicate(DamageDEnum,DamageDbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+								inputs2->DuplicateInput(DamageDEnum,DamageDbarEnum);
 								break;
 							case 3:
@@ -222,13 +234,13 @@
 						break;
 					case MatestarEnum:
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
-						iomodel->FetchDataToInput(elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_B",MaterialsRheologyBEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Ec",MaterialsRheologyEcEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.materials.rheology_Es",MaterialsRheologyEsEnum);
 						for(i=0;i<iomodel->numberofelements;i++) if(iomodel->my_elements[i]) materials->AddObject(new Matestar(i+1,i,iomodel));
 						switch(iomodel->domaindim){
 							case 2:
-								elements->InputDuplicate(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
-								elements->InputDuplicate(MaterialsRheologyEcEnum,MaterialsRheologyEcbarEnum);
-								elements->InputDuplicate(MaterialsRheologyEsEnum,MaterialsRheologyEsbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyBEnum,MaterialsRheologyBbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyEcEnum,MaterialsRheologyEcbarEnum);
+								inputs2->DuplicateInput(MaterialsRheologyEsEnum,MaterialsRheologyEsbarEnum);
 								break;
 							case 3:
@@ -405,5 +417,4 @@
 		iomodel->FetchData(6,"md.mesh.x","md.mesh.y","md.mesh.z","md.geometry.base","md.geometry.thickness","md.mask.ice_levelset");
 		if (iomodel->domaintype == Domain3DsurfaceEnum) iomodel->FetchData(3,"md.mesh.lat","md.mesh.long","md.mesh.r");
-		else iomodel->FetchDataToInput(elements,"md.mesh.scale_factor",MeshScaleFactorEnum,1.);
 		if (isoceancoupling) iomodel->FetchData(2,"md.mesh.lat","md.mesh.long");
 
@@ -453,4 +464,4 @@
 
 	/*Finalize Initialization*/
-	vertices->Finalize();
-}/*}}}*/
+	vertices->Finalize(iomodel);
+}/*}}}*/
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateOutputDefinitions.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateOutputDefinitions.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateOutputDefinitions.cpp	(revision 24686)
@@ -8,5 +8,5 @@
 #include "./ModelProcessorx.h"
 
-void CreateOutputDefinitions(Elements* elements, Parameters* parameters,IoModel* iomodel){
+void CreateOutputDefinitions(Elements* elements,Parameters* parameters,Inputs2* inputs2,IoModel* iomodel){
 
 	int i,j;
@@ -120,6 +120,6 @@
 					for(int k=0;k<elements->Size();k++){
 						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(k));
-						element->InputCreate(misfit_observation_s[j], iomodel,misfit_observation_M_s[j],misfit_observation_N_s[j],obs_vector_type,StringToEnumx(misfit_observation_string_s[j]),7);
-						element->InputCreate(misfit_weights_s[j], iomodel,misfit_weights_M_s[j],misfit_weights_N_s[j],weight_vector_type,StringToEnumx(misfit_weights_string_s[j]),7);
+						element->InputCreate(misfit_observation_s[j],inputs2,iomodel,misfit_observation_M_s[j],misfit_observation_N_s[j],obs_vector_type,StringToEnumx(misfit_observation_string_s[j]),7);
+						element->InputCreate(misfit_weights_s[j],inputs2,iomodel,misfit_weights_M_s[j],misfit_weights_N_s[j],weight_vector_type,StringToEnumx(misfit_weights_string_s[j]),7);
 					}
 
@@ -130,5 +130,4 @@
 					char* string=NULL;
 					IssmDouble* matrix = NULL;
-
 					string = misfit_definitionstring_s[j];		xDelete<char>(string);
 					string = misfit_observation_string_s[j];	xDelete<char>(string);
@@ -210,9 +209,7 @@
 					/*Now, for this particular cfsurfacesquare object, make sure we plug into the elements: the observation, and the weights.*/
 					for(int k=0;k<elements->Size();k++){
-
 						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(k));
-
-						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_observation_s[j], iomodel,cfsurfacesquare_observation_M_s[j],cfsurfacesquare_observation_N_s[j],obs_vector_type,StringToEnumx(cfsurfacesquare_observation_string_s[j]),7,SurfaceObservationEnum);
-						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_weights_s[j], iomodel,cfsurfacesquare_weights_M_s[j],cfsurfacesquare_weights_N_s[j],weight_vector_type,StringToEnumx(cfsurfacesquare_weights_string_s[j]),7,WeightsSurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_observation_s[j],inputs2,iomodel,cfsurfacesquare_observation_M_s[j],cfsurfacesquare_observation_N_s[j],obs_vector_type,StringToEnumx(cfsurfacesquare_observation_string_s[j]),7,SurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacesquare_definitionstring_s[j]),cfsurfacesquare_weights_s[j],inputs2,iomodel,cfsurfacesquare_weights_M_s[j],cfsurfacesquare_weights_N_s[j],weight_vector_type,StringToEnumx(cfsurfacesquare_weights_string_s[j]),7,WeightsSurfaceObservationEnum);
 
 					}
@@ -285,5 +282,5 @@
 						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(k));
 
-						element->DatasetInputAdd(StringToEnumx(cfdragcoeffabsgrad_definitionstring_s[j]),cfdragcoeffabsgrad_weights_s[j], iomodel,cfdragcoeffabsgrad_weights_M_s[j],cfdragcoeffabsgrad_weights_N_s[j],weight_vector_type,StringToEnumx(cfdragcoeffabsgrad_weights_string_s[j]),7,WeightsSurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfdragcoeffabsgrad_definitionstring_s[j]),cfdragcoeffabsgrad_weights_s[j],inputs2,iomodel,cfdragcoeffabsgrad_weights_M_s[j],cfdragcoeffabsgrad_weights_N_s[j],weight_vector_type,StringToEnumx(cfdragcoeffabsgrad_weights_string_s[j]),7,WeightsSurfaceObservationEnum);
 
 					}
@@ -368,7 +365,7 @@
 						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(k));
 
-						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vxobs[j], iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vxobs_string[j]),7,VxObsEnum);
-							element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vyobs[j], iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vyobs_string[j]),7,VyObsEnum);
-						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_weights[j], iomodel,cfsurfacelogvel_weights_M[j],cfsurfacelogvel_weights_N[j],weight_vector_type,StringToEnumx(cfsurfacelogvel_weightstring[j]),7,WeightsSurfaceObservationEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vxobs[j],inputs2,iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vxobs_string[j]),7,VxObsEnum);
+							element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_vyobs[j],inputs2,iomodel,cfsurfacelogvel_observation_M[j],cfsurfacelogvel_observation_N[j],obs_vector_type,StringToEnumx(cfsurfacelogvel_vyobs_string[j]),7,VyObsEnum);
+						element->DatasetInputAdd(StringToEnumx(cfsurfacelogvel_definitionstring[j]),cfsurfacelogvel_weights[j],inputs2,iomodel,cfsurfacelogvel_weights_M[j],cfsurfacelogvel_weights_N[j],weight_vector_type,StringToEnumx(cfsurfacelogvel_weightstring[j]),7,WeightsSurfaceObservationEnum);
 
 					}
@@ -594,6 +591,6 @@
 				for(int i=0;i<num_cost_functions;i++){
 					cost_function=cost_function_enums[i];
-					if(     cost_function==ThicknessAbsMisfitEnum) iomodel->FetchDataToInput(elements,"md.numberedcostfunction.thickness_obs",InversionThicknessObsEnum);
-					else if(cost_function==SurfaceAbsMisfitEnum)   iomodel->FetchDataToInput(elements,"md.numberedcostfunction.surface_obs",InversionSurfaceObsEnum);
+					if(     cost_function==ThicknessAbsMisfitEnum) iomodel->FetchDataToInput(inputs2,elements,"md.numberedcostfunction.thickness_obs",InversionThicknessObsEnum);
+					else if(cost_function==SurfaceAbsMisfitEnum)   iomodel->FetchDataToInput(inputs2,elements,"md.numberedcostfunction.surface_obs",InversionSurfaceObsEnum);
 					else if(cost_function==SurfaceAbsVelMisfitEnum
 							|| cost_function==SurfaceRelVelMisfitEnum
@@ -601,6 +598,6 @@
 							|| cost_function==SurfaceLogVxVyMisfitEnum
 							|| cost_function==SurfaceAverageVelMisfitEnum){
-						iomodel->FetchDataToInput(elements,"md.numberedcostfunction.vx_obs",InversionVxObsEnum);
-						if(domaintype!=Domain2DverticalEnum) iomodel->FetchDataToInput(elements,"md.numberedcostfunction.vy_obs",InversionVyObsEnum);
+						iomodel->FetchDataToInput(inputs2,elements,"md.numberedcostfunction.vx_obs",InversionVxObsEnum);
+						if(domaintype!=Domain2DverticalEnum) iomodel->FetchDataToInput(inputs2,elements,"md.numberedcostfunction.vy_obs",InversionVyObsEnum);
 					}
 				}
@@ -611,5 +608,5 @@
 					for(int k=0;k<elements->Size();k++){
 						Element* element=xDynamicCast<Element*>(elements->GetObjectByOffset(k));
-						element->DatasetInputCreate(cost_functions_weights[j],cost_functions_weights_M[j],cost_functions_weights_N[j],cost_function_enums,num_cost_functions,iomodel,InversionCostFunctionsCoefficientsEnum);
+						element->DatasetInputCreate(cost_functions_weights[j],cost_functions_weights_M[j],cost_functions_weights_N[j],cost_function_enums,num_cost_functions,inputs2,iomodel,InversionCostFunctionsCoefficientsEnum);
 					}
 					output_definitions->AddObject(new Numberedcostfunction(ncf_name_s[j],StringToEnumx(ncf_definitionstring_s[j]),num_cost_functions,cost_function_enums));
Index: /issm/trunk/src/c/modules/ModelProcessorx/CreateParameters.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/CreateParameters.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/CreateParameters.cpp	(revision 24686)
@@ -223,5 +223,4 @@
 			parameters->AddObject(iomodel->CopyConstantObject("md.basalforcings.num_basins",BasalforcingsPicoNumBasinsEnum));
 			parameters->AddObject(iomodel->CopyConstantObject("md.basalforcings.maxboxcount",BasalforcingsPicoMaxboxcountEnum));
-			parameters->AddObject(iomodel->CopyConstantObject("md.basalforcings.overturning_coeff",BasalforcingsPicoOverturningCoeffEnum));
 			parameters->AddObject(iomodel->CopyConstantObject("md.basalforcings.gamma_T",BasalforcingsPicoGammaTEnum));
 			parameters->AddObject(iomodel->CopyConstantObject("md.basalforcings.isplume",BasalforcingsPicoIsplumeEnum));
Index: /issm/trunk/src/c/modules/ModelProcessorx/Dakota/CreateParametersDakota.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Dakota/CreateParametersDakota.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Dakota/CreateParametersDakota.cpp	(revision 24686)
@@ -40,4 +40,6 @@
 
 	if(dakota_analysis){
+
+		parameters->AddObject(iomodel->CopyConstantObject("md.qmu.output",QmuOutputEnum));
 
 		iomodel->FindConstant(&name,"md.miscellaneous.name");
Index: /issm/trunk/src/c/modules/ModelProcessorx/Dakota/UpdateElementsAndMaterialsDakota.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Dakota/UpdateElementsAndMaterialsDakota.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Dakota/UpdateElementsAndMaterialsDakota.cpp	(revision 24686)
@@ -9,5 +9,5 @@
 #include "../ModelProcessorx.h"
 
-void	UpdateElementsAndMaterialsDakota(Elements* elements,Materials* materials, IoModel* iomodel){
+void	UpdateElementsAndMaterialsDakota(Elements* elements,Inputs2* inputs2,Materials* materials, IoModel* iomodel){
 
 	/*recover parameters: */
@@ -15,4 +15,4 @@
 	iomodel->FindConstant(&dakota_analysis,"md.qmu.isdakota");
 
-	if(dakota_analysis) iomodel->FetchDataToInput(elements,"md.geometry.hydrostatic_ratio",GeometryHydrostaticRatioEnum,0.);
+	if(dakota_analysis) iomodel->FetchDataToInput(inputs2,elements,"md.geometry.hydrostatic_ratio",GeometryHydrostaticRatioEnum,0.);
 }
Index: /issm/trunk/src/c/modules/ModelProcessorx/ElementsAndVerticesPartitioning.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/ElementsAndVerticesPartitioning.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/ElementsAndVerticesPartitioning.cpp	(revision 24686)
@@ -60,8 +60,35 @@
 	}
 
+	/*Use ice levelset for weights*/
+	int fordan = 0;
+	int* weights = NULL;
+	if(fordan){
+		IssmDouble* icelevelset = NULL;
+		iomodel->FetchData(&icelevelset,NULL,NULL,"md.mask.ice_levelset");
+
+		weights = xNew<int>(iomodel->numberofvertices);
+		for(int i=0;i<iomodel->numberofvertices;i++){
+			if(icelevelset[i]>=0) weights[i] = 1;
+			if(icelevelset[i]<0)  weights[i] = 100;
+		}
+		xDelete<IssmDouble>(icelevelset);
+	}
+
 	/*Partition and free resouces*/
-	MeshPartitionx(&epart,&npart,iomodel->numberofelements,iomodel->numberofvertices,iomodel->elements,numberofelements2d,numberofvertices2d,elements2d,numlayers,elements_width,iomodel->meshelementtype,num_procs);
+	MeshPartitionx(&epart,&npart,iomodel->numberofelements,iomodel->numberofvertices,iomodel->elements,numberofelements2d,numberofvertices2d,elements2d,weights,numlayers,elements_width,iomodel->meshelementtype,num_procs);
 	xDelete<int>(elements2d);
 	xDelete<int>(npart);
+	xDelete<int>(weights);
+
+	if(fordan){
+		for(int i=0;i<IssmComm::GetSize();i++){
+			if(i==IssmComm::GetRank()){
+				int temp =0;
+				for(int j=0;j<iomodel->numberofelements;j++) if(epart[j]==i) temp++;
+				_printf_("Partition #"<<i<<" number of elements: "<<temp<<"\n");
+			}
+			ISSM_MPI_Barrier(IssmComm::GetComm());
+		}
+	}
 
 	/*Deal with rifts, they have to be included into one partition only, not several: */
Index: /issm/trunk/src/c/modules/ModelProcessorx/ModelProcessorx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/ModelProcessorx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/ModelProcessorx.cpp	(revision 24686)
@@ -13,5 +13,5 @@
 #include "./ModelProcessorx.h"
 
-void ModelProcessorx(Elements** pelements, Nodes*** pnodes, Vertices** pvertices, Materials** pmaterials, Constraints*** pconstraints, Loads*** ploads, Parameters** pparameters,IoModel* iomodel,FILE* toolkitfile, char* rootpath,const int solution_enum,const int nummodels,const int* analysis_enum_list){
+void ModelProcessorx(Elements** pelements, Nodes*** pnodes, Vertices** pvertices, Materials** pmaterials, Constraints*** pconstraints, Loads*** ploads, Parameters** pparameters,Inputs2** pinputs2,IoModel* iomodel,FILE* toolkitfile, char* rootpath,const int solution_enum,const int nummodels,const int* analysis_enum_list){
 	_assert_(nummodels>0);
 
@@ -40,7 +40,13 @@
 	/*Create elements, vertices and materials, independent of analysis_enum: */
 	CreateElements(elements,iomodel,nummodels);
-	CreateMaterials(elements,materials,iomodel,nummodels);
 	CreateVertices(elements,vertices,iomodel,solution_enum);
 	CreateParameters(parameters,iomodel,rootpath,toolkitfile,solution_enum);
+
+	/*Should move to CreateInputs2*/
+	Inputs2 *inputs2 = new Inputs2(elements->Size(),vertices->Size());
+	if (iomodel->domaintype != Domain3DsurfaceEnum) iomodel->FetchDataToInput(inputs2,elements,"md.mesh.scale_factor",MeshScaleFactorEnum,1.);
+
+	/*Can now do Materials since we have created Inputs*/
+	CreateMaterials(elements,inputs2,materials,iomodel,nummodels);
 
 	/*Update datasets based on each analysis (and add nodes, constrains and loads)*/
@@ -54,5 +60,5 @@
 		analysis->UpdateParameters(parameters,iomodel,solution_enum,analysis_enum);
 		analysis->CreateNodes(nodes[i],iomodel);
-		analysis->UpdateElements(elements,iomodel,i,analysis_enum);
+		analysis->UpdateElements(elements,inputs2,iomodel,i,analysis_enum);
 		analysis->CreateConstraints(constraints[i],iomodel);
 		analysis->CreateLoads(loads[i],iomodel);
@@ -63,18 +69,21 @@
 		loads[i]->Presort();
 		nodes[i]->Presort();
+
+		/*Finalize loads (count pengrids,penpairs,rifts,etc)*/
+		loads[i]->Finalize();
 	}
 
 	/*Solution specific updates*/
 	if(VerboseMProcessor()) _printf0_("   updating elements and materials for control parameters" << "\n");
-	UpdateElementsAndMaterialsControl(elements,parameters,materials,iomodel);
+	UpdateElementsAndMaterialsControl(elements,parameters,inputs2,materials,iomodel);
 	#ifdef _HAVE_DAKOTA_
 	if(VerboseMProcessor()) _printf0_("   updating elements and materials for uncertainty quantification" << "\n");
-	UpdateElementsAndMaterialsDakota(elements,materials,iomodel);
+	UpdateElementsAndMaterialsDakota(elements,inputs2,materials,iomodel);
 	#endif
-	if(solution_enum==TransientSolutionEnum) UpdateElementsTransient(elements,parameters,iomodel);
+	if(solution_enum==TransientSolutionEnum) UpdateElementsTransient(elements,parameters,inputs2,iomodel);
 
 	/*Output definitions dataset: */
 	if(VerboseMProcessor()) _printf0_("   creating output definitions" << "\n");
-	CreateOutputDefinitions(elements,parameters,iomodel);
+	CreateOutputDefinitions(elements,parameters,inputs2,iomodel);
 
 	/* Sort datasets:
@@ -94,4 +103,5 @@
 	*ploads       = loads;
 	*pparameters  = parameters;
+	*pinputs2     = inputs2;
 
 	if(VerboseMProcessor()) _printf0_("   done with model processor \n");
Index: /issm/trunk/src/c/modules/ModelProcessorx/ModelProcessorx.h
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/ModelProcessorx.h	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/ModelProcessorx.h	(revision 24686)
@@ -9,9 +9,9 @@
 #include "../../analyses/analyses.h"
 
-void ModelProcessorx(Elements** pelements, Nodes*** pnodes, Vertices** pvertices, Materials** pmaterials, Constraints*** pconstraints, Loads*** ploads, Parameters** pparameters,IoModel* iomodel,FILE* toolkitfile, char* rootpath,const int solution_type,const int nummodels,const int* analysis_type_listh);
+void ModelProcessorx(Elements** pelements, Nodes*** pnodes, Vertices** pvertices, Materials** pmaterials, Constraints*** pconstraints, Loads*** ploads, Parameters** pparameters,Inputs2** pinputs2,IoModel* iomodel,FILE* toolkitfile, char* rootpath,const int solution_type,const int nummodels,const int* analysis_type_listh);
 
 /*Creation of fem datasets: general drivers*/
 void CreateElements(Elements* elements,IoModel* iomodel,int nummodels);
-void CreateMaterials(Elements* elements,Materials* materials,IoModel* iomodel,int nummodels);
+void CreateMaterials(Elements* elements,Inputs2* inputs2,Materials* materials,IoModel* iomodel,int nummodels);
 void CreateVertices(Elements* elements,Vertices* vertices,IoModel* iomodel,int solution_type,bool isamr=false);
 void CreateParameters(Parameters*parameters,IoModel* iomodel,char* rootpath,FILE* toolkitfile,const int solution_type);
@@ -19,9 +19,9 @@
 void CreateParametersControl(Parameters* parameters,IoModel* iomodel,int solution_type);
 void CreateParametersDakota(Parameters* parameters,IoModel* iomodel,char* rootpath);
-void CreateOutputDefinitions(Elements* elements, Parameters* parameters,IoModel* iomodel);
-void UpdateElementsAndMaterialsControl(Elements* elements,Parameters* parameters,Materials* materials, IoModel* iomodel);
-void UpdateElementsAndMaterialsControlAD(Elements* elements,Parameters* parameters,Materials* materials, IoModel* iomodel);
-void UpdateElementsAndMaterialsDakota(Elements* elements,Materials* materials, IoModel* iomodel);
-void UpdateElementsTransient(Elements* elements,Parameters* parameters,IoModel* iomodel);
+void CreateOutputDefinitions(Elements* elements, Parameters* parameters,Inputs2* inputs2,IoModel* iomodel);
+void UpdateElementsAndMaterialsControl(Elements* elements,Parameters* parameters,Inputs2* inputs2,Materials* materials, IoModel* iomodel);
+void UpdateElementsAndMaterialsControlAD(Elements* elements,Parameters* parameters,Inputs2* inputs2,Materials* materials, IoModel* iomodel);
+void UpdateElementsAndMaterialsDakota(Elements* elements,Inputs2* inputs2,Materials* materials, IoModel* iomodel);
+void UpdateElementsTransient(Elements* elements,Parameters* parameters,Inputs2* inputs2,IoModel* iomodel);
 void CreateNodes(Nodes*nodes, IoModel* iomodel,int analysis,int finite_element,bool isamr=false,int approximation=NoneApproximationEnum,int* approximations=NULL);
 
Index: /issm/trunk/src/c/modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp
===================================================================
--- /issm/trunk/src/c/modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ModelProcessorx/Transient/UpdateElementsTransient.cpp	(revision 24686)
@@ -9,5 +9,5 @@
 #include "../ModelProcessorx.h"
 
-void	UpdateElementsTransient(Elements* elements, Parameters* parameters,IoModel* iomodel){
+void	UpdateElementsTransient(Elements* elements, Parameters* parameters,Inputs2* inputs2,IoModel* iomodel){
 
 	/*FIXME: this should go into parameterization update*/
@@ -17,5 +17,5 @@
 
 	if(isgroundingline){
-		iomodel->FetchDataToInput(elements,"md.geometry.bed",BedEnum);
+		iomodel->FetchDataToInput(inputs2,elements,"md.geometry.bed",BedEnum);
 	}
 }
Index: /issm/trunk/src/c/modules/OutputResultsx/OutputResultsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/OutputResultsx/OutputResultsx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/OutputResultsx/OutputResultsx.cpp	(revision 24686)
@@ -20,19 +20,20 @@
 	FILE       *fid                     = NULL;
 	char       *outputfilename          = NULL;
-	char        cpu_outputfilename[100];        //easier to convert an integer with sprintf
+	char        outputfilename2[100];        //easier to convert an integer with sprintf
 	bool        io_gather;
 	int         solutiontype;
 	char*       solutiontypestring      = NULL;
-	bool        dakota_analysis         = false;
 
-	/*retrieve parameters: */
-	femmodel->parameters->FindParam(&dakota_analysis,QmuIsdakotaEnum);
 
 	/*recover my_rank:*/
 	my_rank=IssmComm::GetRank();
 
+	/*If we are running dakota, do we want to output?*/
+	bool dakota_analysis;
+	femmodel->parameters->FindParam(&dakota_analysis,QmuIsdakotaEnum);
 	if(dakota_analysis){
-		//no need to output anything, Dakota analysis has different outputs
-		return; 
+		bool dakota_output;
+		femmodel->parameters->FindParam(&dakota_output,QmuOutputEnum);
+		if(!dakota_output) return; 
 	}
 
@@ -57,10 +58,20 @@
 	if(io_gather){
 		/*Just open the file for output on cpu 0. We are gathering the data on cpu 0 from all other cpus: */
-		if(my_rank==0) fid=pfopen0(outputfilename ,"ab+");
+		if(!dakota_analysis){
+			if(my_rank==0) fid=pfopen0(outputfilename ,"ab+");
+		}
+		else{
+			if(my_rank==0){
+				int currEvalId ;
+				femmodel->parameters->FindParam(&currEvalId,QmuCurrEvalIdEnum);
+				sprintf(outputfilename2,"%s.%i",outputfilename,currEvalId);
+				fid=pfopen0(outputfilename2,"ab+");
+			}
+		}
 	}
 	else{
 		/*We are opening different  files for output on all cpus. Append the  rank to the filename, and open: */
-		sprintf(cpu_outputfilename,"%s.%i",outputfilename,my_rank);
-		fid=pfopen(cpu_outputfilename ,"ab+");
+		sprintf(outputfilename2,"%s.%i",outputfilename,my_rank);
+		fid=pfopen(outputfilename2 ,"ab+");
 	}
 
@@ -83,7 +94,12 @@
 	/*Close output file? :*/
 	if(io_gather){
-		if(my_rank==0) pfclose(fid,outputfilename);
+		if(!dakota_analysis){
+			if(my_rank==0) pfclose(fid,outputfilename);
+		}
+		else{
+			if(my_rank==0) pfclose(fid,outputfilename2);
+		}
 	}
-	else pfclose(fid,cpu_outputfilename);
+	else pfclose(fid,outputfilename2);
 #endif
 
Index: /issm/trunk/src/c/modules/ResetConstraintsx/ResetConstraintsx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ResetConstraintsx/ResetConstraintsx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ResetConstraintsx/ResetConstraintsx.cpp	(revision 24686)
@@ -24,5 +24,5 @@
 
 	/*Deal with rift first*/
-	if(RiftIsPresent(femmodel->loads,analysis_type)){
+	if(femmodel->loads->numrifts){
 		_error_("rift constraints reset not supported yet!");
 	}
@@ -43,5 +43,5 @@
 
 	/*Deal with rift first*/
-	if(RiftIsPresent(femmodel->loads,femmodel->analysis_type_list[femmodel->analysis_counter])){
+	if(femmodel->loads->numrifts){
 		_error_("rift constraints reset not supported yet!");
 	}
Index: /issm/trunk/src/c/modules/RheologyBAbsGradientx/RheologyBAbsGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/RheologyBAbsGradientx/RheologyBAbsGradientx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/RheologyBAbsGradientx/RheologyBAbsGradientx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void RheologyBAbsGradientx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -53,6 +54,6 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* rheologyb_input=element->GetInput(MaterialsRheologyBEnum);                  _assert_(rheologyb_input);
+	DatasetInput2* weights_input=element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* rheologyb_input=element->GetInput2(MaterialsRheologyBEnum);                  _assert_(rheologyb_input);
 
 	/* Start  looping on the number of gaussian points: */
@@ -124,7 +125,7 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* rheologyb_input=element->GetInput(MaterialsRheologyBbarEnum);            _assert_(rheologyb_input);
-	Input* rheologyb0_input=element->GetInput(RheologyBInitialguessEnum);           _assert_(rheologyb0_input);
+	DatasetInput2* weights_input=element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* rheologyb_input=element->GetInput2(MaterialsRheologyBbarEnum);            _assert_(rheologyb_input);
+	Input2* rheologyb0_input=element->GetInput2(RheologyBInitialguessEnum);           _assert_(rheologyb0_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/RheologyBbarAbsGradientx/RheologyBbarAbsGradientx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void RheologyBbarAbsGradientx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -59,6 +60,6 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=basalelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* rheologyb_input=basalelement->GetInput(MaterialsRheologyBbarEnum);                  _assert_(rheologyb_input);
+	DatasetInput2* weights_input=basalelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* rheologyb_input=basalelement->GetInput2(MaterialsRheologyBbarEnum);                  _assert_(rheologyb_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/modules/Scotchx/Scotchx.h
===================================================================
--- /issm/trunk/src/c/modules/Scotchx/Scotchx.h	(revision 24685)
+++ /issm/trunk/src/c/modules/Scotchx/Scotchx.h	(revision 24686)
@@ -1,10 +1,10 @@
 /*!\file:  Scotchxx.h
  * \brief header file for Scotch partitioner
- */ 
+ */
 
 #ifndef _SCOTCHX_H
 #define _SCOTCHX_H
 
-#undef __FUNCT__ 
+#undef __FUNCT__
 #define __FUNCT__  "Scotchx"
 
@@ -23,8 +23,15 @@
 	#define GMAP
 
-	#include "scotch_module.h"
-	#include "scotch_common.h"
-	#include "scotch.h"
-	#include "scotch_gmap.h"
+	#ifdef _PETSC_SCOTCH_
+		#include "scotch_module.h"
+		#include "scotch_common.h"
+		#include "scotch_gmap.h"
+	#endif
+
+	#ifdef _HAVE_MPI_
+		#include "ptscotch.h"
+	#else
+		#include "scotch.h"
+	#endif
 
 	/*
Index: /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SetActiveNodesLSMx/SetActiveNodesLSMx.cpp	(revision 24686)
@@ -27,5 +27,5 @@
 			case TaylorHoodEnum:case XTaylorHoodEnum:case LATaylorHoodEnum:
 			case CrouzeixRaviartEnum:case LACrouzeixRaviartEnum:case OneLayerP4zEnum:{
-				Input* input=element->GetInput(IceMaskNodeActivationEnum);
+				Input2* input=element->GetInput2(IceMaskNodeActivationEnum);
 				if(!input) _error_("Input " << EnumToStringx(IceMaskNodeActivationEnum) << " not found in element");
 
Index: /issm/trunk/src/c/modules/Solverx/Solverx.cpp
===================================================================
--- /issm/trunk/src/c/modules/Solverx/Solverx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/Solverx/Solverx.cpp	(revision 24686)
@@ -25,5 +25,5 @@
 	if(!checkconvergence(Kff,pf,uf,parameters)){
 
-		_printf0_("   WARNING: Solver failed, Trying Recovery Mode\n");
+		_printf0_("WARNING: Solver failed, Trying Recovery Mode\n");
 		ToolkitsOptionsFromAnalysis(parameters,RecoveryAnalysisEnum);
 		delete uf;
@@ -65,5 +65,5 @@
 	/*Check convergence*/
 	if(solver_residue>solver_residue_threshold){
-		_printf0_("   solver residue too high!: norm(KU-F)/norm(F)=" << solver_residue << " => Trying recovery solver\n");
+		_printf0_("solver residue too high!: norm(KU-F)/norm(F)=" << solver_residue << " > "<<solver_residue_threshold<<" (md.settings.solver_residue_threshold)");
 		return false;
 	}
Index: /issm/trunk/src/c/modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceAbsVelMisfitx/SurfaceAbsVelMisfitx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void SurfaceAbsVelMisfitx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -59,12 +60,12 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=topelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input     =topelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input  =topelement->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input     = NULL;
-	Input* vyobs_input  = NULL;
+	DatasetInput2* weights_input=topelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input     =topelement->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input  =topelement->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input     = NULL;
+	Input2* vyobs_input  = NULL;
 	if(numcomponents==2){
-		vy_input    =topelement->GetInput(VyEnum);              _assert_(vy_input);
-		vyobs_input =topelement->GetInput(InversionVyObsEnum);  _assert_(vyobs_input);
+		vy_input    =topelement->GetInput2(VyEnum);              _assert_(vy_input);
+		vyobs_input =topelement->GetInput2(InversionVyObsEnum);  _assert_(vyobs_input);
 	}
 
Index: /issm/trunk/src/c/modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceAverageVelMisfitx/SurfaceAverageVelMisfitx.cpp	(revision 24686)
@@ -8,4 +8,5 @@
 #include "../../toolkits/toolkits.h"
 #include "../SurfaceAreax/SurfaceAreax.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void SurfaceAverageVelMisfitx(IssmDouble* pJ,FemModel* femmodel){
@@ -67,12 +68,12 @@
 	/*Retrieve all inputs we will be needing: */
 	topelement->GetInputValue(&S,SurfaceAreaEnum);
-	Input* weights_input=topelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input     =topelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input  =topelement->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input     = NULL;
-	Input* vyobs_input  = NULL;
+	DatasetInput2* weights_input=topelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input     =topelement->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input  =topelement->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input     = NULL;
+	Input2* vyobs_input  = NULL;
 	if(numcomponents==2){
-		vy_input    =topelement->GetInput(VyEnum);              _assert_(vy_input);
-		vyobs_input =topelement->GetInput(InversionVyObsEnum);  _assert_(vyobs_input);
+		vy_input    =topelement->GetInput2(VyEnum);              _assert_(vy_input);
+		vyobs_input =topelement->GetInput2(InversionVyObsEnum);  _assert_(vyobs_input);
 	}
 
Index: /issm/trunk/src/c/modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceLogVelMisfitx/SurfaceLogVelMisfitx.cpp	(revision 24686)
@@ -4,7 +4,7 @@
 
 #include "./SurfaceLogVelMisfitx.h"
-
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void SurfaceLogVelMisfitx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -62,12 +62,12 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=topelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input     =topelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input  =topelement->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input     = NULL;
-	Input* vyobs_input  = NULL;
+	DatasetInput2* weights_input=topelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input     =topelement->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input  =topelement->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input     = NULL;
+	Input2* vyobs_input  = NULL;
 	if(numcomponents==2){
-		vy_input    =topelement->GetInput(VyEnum);              _assert_(vy_input);
-		vyobs_input =topelement->GetInput(InversionVyObsEnum);  _assert_(vyobs_input);
+		vy_input    =topelement->GetInput2(VyEnum);              _assert_(vy_input);
+		vyobs_input =topelement->GetInput2(InversionVyObsEnum);  _assert_(vyobs_input);
 	}
 
Index: /issm/trunk/src/c/modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceLogVxVyMisfitx/SurfaceLogVxVyMisfitx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void SurfaceLogVxVyMisfitx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -61,12 +62,12 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=topelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input     =topelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input  =topelement->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input     = NULL;
-	Input* vyobs_input  = NULL;
+	DatasetInput2* weights_input=topelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input     =topelement->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input  =topelement->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input     = NULL;
+	Input2* vyobs_input  = NULL;
 	if(numcomponents==2){
-		vy_input    =topelement->GetInput(VyEnum);              _assert_(vy_input);
-		vyobs_input =topelement->GetInput(InversionVyObsEnum);  _assert_(vyobs_input);
+		vy_input    =topelement->GetInput2(VyEnum);              _assert_(vy_input);
+		vyobs_input =topelement->GetInput2(InversionVyObsEnum);  _assert_(vyobs_input);
 	}
 
Index: /issm/trunk/src/c/modules/SurfaceMassBalancex/Gembx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceMassBalancex/Gembx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceMassBalancex/Gembx.cpp	(revision 24686)
@@ -6,4 +6,6 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../modules.h"
+#include "../../classes/Inputs2/TransientInput2.h"
 
 const double Pi = 3.141592653589793;
@@ -32,7 +34,49 @@
 void Gembx(FemModel* femmodel){  /*{{{*/
 
-	for(int i=0;i<femmodel->elements->Size();i++){
-        Element* element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
-        element->SmbGemb();
+	int        count=0;
+	IssmDouble time,dt,finaltime,starttime;
+	IssmDouble timeclim=0.0;
+	IssmDouble t,smb_dt;
+   IssmDouble delta;
+	bool       isclimatology=false;
+
+	femmodel->parameters->FindParam(&time,TimeEnum);                        /*transient core time at which we run the smb core*/
+   femmodel->parameters->FindParam(&dt,TimesteppingTimeStepEnum);          /*transient core time step*/
+	femmodel->parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
+	femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
+   femmodel->parameters->FindParam(&smb_dt,SmbDtEnum);                     /*time period for the smb solution,  usually smaller than the glaciological dt*/
+	femmodel->parameters->FindParam(&isclimatology,SmbIsclimatologyEnum);
+
+	//before starting loop, realize that the transient core runs this smb_core at time = time +deltaT.
+	//go back to time - deltaT:
+	time-=dt;
+
+	IssmDouble timeinputs = time;
+
+	/*Start loop: */
+	count=1;
+	for (t=time;t<time+dt;t=t+smb_dt){
+
+		for(int i=0;i<femmodel->elements->Size();i++){
+			Element* element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
+
+			timeclim=time;
+			if (isclimatology){
+				//If this is a climatology, we need to repeat the forcing after the final time
+				TransientInput2* Ta_input_tr  = element->inputs2->GetTransientInput(SmbTaEnum);    _assert_(Ta_input_tr);
+
+				/*Get temperature climatology value*/
+				int offsetend = Ta_input_tr->GetTimeInputOffset(finaltime);
+				IssmDouble time0     = Ta_input_tr->GetTimeByOffset(-1);
+				IssmDouble timeend   = Ta_input_tr->GetTimeByOffset(offsetend);
+				if (time>time0 & timeend>time0){
+					delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+					timeclim=time0+delta;
+				}
+			}
+			timeinputs = t-time+timeclim;
+			element->SmbGemb(timeinputs,count);
+		}
+		count=count+1;
 	}
 
@@ -107,5 +151,5 @@
 	}
 
-	/*Free ressouces:*/
+	/*Free resouces:*/
 	xDelete(dzT);
 	xDelete(dzB);
@@ -339,5 +383,5 @@
 	}
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	xDelete<IssmDouble>(gsz);
 	xDelete<IssmDouble>(dT);
@@ -494,5 +538,5 @@
 
 			// modification of albedo due to thin layer of snow or solid
-			// condensation (deposition) at the surface surface
+			// condensation (deposition) at the surface 
 
 			// check if condensation occurs & if it is deposited in solid phase
@@ -505,5 +549,5 @@
 			// a_surf = a_wet - (a_wet - a_surf) * exp(-W_surf/W0);
 
-			/*Free ressources:*/
+			/*Free resources:*/
 			xDelete<IssmDouble>(t0);
 			xDelete<IssmDouble>(T);
@@ -650,5 +694,5 @@
 	// discretized heat equation:
 
-	//                 Tp = (Au*Tu° + Ad*Td° + (Ap-Au-Ad)Tp° + S) / Ap
+	//                 Tp = (Au*Tuo+ Ad*Tdo+ (Ap-Au-Ad)Tpo+ S) / Ap
 
 	// where neighbor coefficients Au, Ap, & Ad are
@@ -659,6 +703,5 @@
 
 	// and u & d represent grid points up and down from the center grid point 
-	// p and // u & d represent grid points up and down from the center grid 
-	// point p and ° identifies previous time step values. S is a source term.
+	// point p and o identifies previous time step values. S is a source term.
 
 	// u, d, and p conductivities
@@ -873,5 +916,5 @@
 	}
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	xDelete<IssmDouble>(K);
 	xDelete<IssmDouble>(KU);
@@ -1020,5 +1063,5 @@
 			swf[0] = swf[0]+ swfS[2];
 
-			/*Free ressources: */
+			/*Free resources: */
 			xDelete<IssmDouble>(gsz);
 			xDelete<IssmDouble>(B1_cum);
@@ -1046,5 +1089,5 @@
 			// SWs and SWss coefficients need to be better constranted. Greuell
 			// and Konzelmann 1994 used SWs = 0.36 and SWss = 0.64 as this the
-			// the // of SW radiation with wavelengths > and < 800 nm
+			// the % of SW radiation with wavelengths > and < 800 nm
 			// respectively.  This, however, may not account for the fact that
 			// the albedo of wavelengths > 800 nm has a much lower albedo.
@@ -1086,5 +1129,5 @@
 			swf[0] += swf_s;
 
-			/*Free ressources:*/
+			/*Free resources:*/
 			xDelete<IssmDouble>(B_cum);
 			xDelete<IssmDouble>(exp_B);
@@ -1267,5 +1310,5 @@
 
 	}
-	/*Free ressources:*/
+	/*Free resources:*/
 	if(mInit)xDelete<IssmDouble>(mInit);
 
@@ -1341,4 +1384,6 @@
 	/*outputs:*/
 	IssmDouble  mAdd = 0.0;
+	IssmDouble  surplusE = 0.0;
+	IssmDouble  surplusT = 0.0;
 	IssmDouble dz_add = 0.0;
 	IssmDouble  Rsum = 0.0;
@@ -1434,12 +1479,22 @@
 			int i = 0;
 			while (cellsum(surpE,n) > 0.0+Ttol && i<n){
-				// use surplus energy to increase the temperature of lower cell
-				T[i+1] = surpE[i]/m[i+1]/CI + T[i+1];
-
-				exsT[i+1] = max(0.0, T[i+1] - CtoK) + exsT[i+1];
-				T[i+1] = min(CtoK, T[i+1]);
-
-				surpT[i+1] = max(0.0, exsT[i+1] - LF/CI);
-				surpE[i+1] = surpT[i+1] * CI * m[i+1];
+
+				if (i<n-1){
+					// use surplus energy to increase the temperature of lower cell
+					T[i+1] = surpE[i]/m[i+1]/CI + T[i+1];
+
+					exsT[i+1] = max(0.0, T[i+1] - CtoK) + exsT[i+1];
+					T[i+1] = min(CtoK, T[i+1]);
+
+					surpT[i+1] = max(0.0, exsT[i+1] - LF/CI);
+					surpE[i+1] = surpT[i+1] * CI * m[i+1];
+				}
+				else{
+					surplusT=max(0.0, exsT[i] - LF/CI);
+					surplusE=surpE[i];
+					if(VerboseSmb() && sid==0 && IssmComm::GetRank()==0){
+						_printf0_(" WARNING: surplus energy at the base of GEMB column\n");
+					}
+				}
 
 				// adjust current cell properties (again 159.1342 is the max T)
@@ -1591,5 +1646,5 @@
 		for(int i=0;i<n;i++)dz[i] = m[i] / d[i];
 
-		/*Free ressources:*/
+		/*Free resources:*/
 		xDelete<IssmDouble>(F);
 		xDelete<IssmDouble>(R);
@@ -1650,5 +1705,5 @@
 			m[i+1] = m_new;                             // combine top masses
 
-			// set cell to 99999 for deletion
+			// set cell to -99999 for deletion
 			m[i] = Delflag;
 		}
@@ -1657,5 +1712,5 @@
 	//If last cell has to be merged
 	if(lastCellFlag){
-         //find closest cell to merge with
+      //find closest cell to merge with
 		for(int i=n-2;i>=0;i--){
 			if(m[i]!=Delflag){
@@ -1680,5 +1735,5 @@
 		m[X1] = m_new;                             // combine top masses
 
-		// set cell to 99999 for deletion
+		// set cell to -99999 for deletion
 		m[X2] = Delflag;
 	}
@@ -1705,5 +1760,5 @@
 	// check if any of the top 10 cell depths are too large
 	X=0;
-	for(int i=9;i>=0;i--){
+	for(int i=min(9,n-1);i>=0;i--){
 		if(dz[i]> 2.0*dzMin+Dtol){
 			X=i;
@@ -1716,19 +1771,19 @@
 		if (dz[j] > dzMin*2.0+Dtol){
 
-				// _printf_("dz > dzMin * 2");
-				// split in two
-				cellsplit(&dz, n, j,.5);
-				cellsplit(&W, n, j,.5);
-				cellsplit(&m, n, j,.5);
-				cellsplit(&T, n, j,1.0);
-				cellsplit(&d, n, j,1.0);
-				cellsplit(&a, n, j,1.0);
-				cellsplit(&EI, n, j,.5);
-				cellsplit(&EW, n, j,.5);
-				cellsplit(&re, n, j,1.0);
-				cellsplit(&gdn, n, j,1.0);
-				cellsplit(&gsp, n, j,1.0);
-				n++;
-				X=X+1;
+			// _printf_("dz > dzMin * 2");
+			// split in two
+			cellsplit(&dz, n, j,.5);
+			cellsplit(&W, n, j,.5);
+			cellsplit(&m, n, j,.5);
+			cellsplit(&T, n, j,1.0);
+			cellsplit(&d, n, j,1.0);
+			cellsplit(&a, n, j,1.0);
+			cellsplit(&EI, n, j,.5);
+			cellsplit(&EW, n, j,.5);
+			cellsplit(&re, n, j,1.0);
+			cellsplit(&gdn, n, j,1.0);
+			cellsplit(&gsp, n, j,1.0);
+			n++;
+			X=X+1;
 		}
 		else j++;
@@ -1818,10 +1873,10 @@
 	#ifndef _HAVE_AD_
 	dm = round((mSum0 - mSum1 + mAdd)*100.0)/100.0;
-	dE = round(sumE0 - sumE1 - sumER +  addE);
+	dE = round(sumE0 - sumE1 - sumER +  addE - surplusE);
 	if (dm !=0  || dE !=0) _error_("mass or energy are not conserved in melt equations\n"
 			<< "dm: " << dm << " dE: " << dE << "\n");
 	#endif
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	if(m)xDelete<IssmDouble>(m);
 	if(EI)xDelete<IssmDouble>(EI);
@@ -1979,6 +2034,12 @@
 				c0arth = 0.07 * H;
 				c1arth = 0.03 * H;
+				//ERA-5
+				//M0 = max(2.3999 - (0.2610 * log(C)),0.25);
+				//M1 = max(2.7469 - (0.3228 * log(C)),0.25);
+				//RACMO
 				M0 = max(1.6599 - (0.1724 * log(C)),0.25);
 				M1 = max(2.0102 - (0.2458 * log(C)),0.25);
+				//From Ligtenberg
+				//H = exp((-60000.0/(Tmean * R)) + (42400.0/(Tmean * R))) * (C * 9.81);
 				//M0 = max(1.435 - (0.151 * log(C)),0.25);
 				//M1 = max(2.366 - (0.293 * log(C)),0.25);
@@ -1993,8 +2054,13 @@
 				c0arth = 0.07 * H;
 				c1arth = 0.03 * H;
+				// ERA5
+				//M0 = max(1.8920 - (0.1569 * log(C)),0.25);
+				//M1 = max(2.5662 - (0.2274 * log(C)),0.25);
+				// RACMO
+				M0 = max(1.6201 - (0.1450 * log(C)),0.25);
+				M1 = max(2.5577 - (0.2899 * log(C)),0.25);
+				// From Kuipers Munneke
 				//M0 = max(1.042 - (0.0916 * log(C)),0.25);
 				//M1 = max(1.734 - (0.2039 * log(C)),0.25);
-				M0 = max(1.6201 - (0.1450 * log(C)),0.25);
-				M1 = max(2.5577 - (0.2899 * log(C)),0.25);
 				c0 = M0*c0arth;
 				c1 = M1*c1arth;
@@ -2015,5 +2081,5 @@
 	}
 
-	/*Free ressources:*/
+	/*Free resources:*/
 	xDelete<IssmDouble>(mass_init);
 	xDelete<IssmDouble>(cumdz);
@@ -2085,7 +2151,7 @@
 
 	// calculate the Bulk Richardson Number (Ri)
-	Ri = (2.0*9.81* (Vz - z0) * (Ta - Ts)) / ((Ta + Ts)* pow(V,2));
-
-	// calculate Monin-Obukhov stability factors 'coef_M' and 'coef_H'
+	Ri = (2.0*9.81* (Vz - z0) * (Ta - Ts)) / ((Ta + Ts)* pow(V,2.0));
+
+	// calculate Monin-Obukhov stability factors 'coefM' and 'coefH'
 
 	// do not allow Ri to exceed 0.19
Index: /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceMassBalancex/SurfaceMassBalancex.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../toolkits/toolkits.h"
 #include "../modules.h"
+#include "../../classes/Inputs2/TransientInput2.h"
 
 void SmbForcingx(FemModel* femmodel){/*{{{*/
@@ -13,13 +14,11 @@
 	//    INPUT parameters: ni: working size of arrays
 	//    OUTPUT: mass-balance (m/yr ice): agd(NA)
-	bool isclimatology=false;
+	bool isclimatology;
 	femmodel->parameters->FindParam(&isclimatology,SmbIsclimatologyEnum);
 
 	if (isclimatology){
-		int v;
-		IssmDouble time,dt,delta,starttime,finaltime;
-		int offsetend=-1;
-		IssmDouble time0, timeend, timeclim;
-
+
+		/*Get time parameters*/
+		IssmDouble time,dt,starttime,finaltime;
 		femmodel->parameters->FindParam(&time,TimeEnum); 
 		femmodel->parameters->FindParam(&dt,TimesteppingTimeStepEnum);
@@ -27,6 +26,29 @@
 		femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
 		
-		if (time<=starttime+dt){
+		if(time<=starttime+dt){
+			/*FIXME: this is wrong, should be done at the ElementUpdate step of analysis, not here!*/
 			InputDuplicatex(femmodel,SmbMassBalanceEnum,SmbMassBalanceClimateEnum);
+			femmodel->inputs2->DeleteInput(SmbMassBalanceEnum);
+		}
+
+		/*If this is a climatology, we need to repeat the forcing after the final time*/
+		TransientInput2* smb_input=femmodel->inputs2->GetTransientInput(SmbMassBalanceClimateEnum); _assert_(smb_input);
+
+		/*Get accumulation climatology value*/
+		int offsetend = smb_input->GetTimeInputOffset(finaltime);
+		IssmDouble time0     = smb_input->GetTimeByOffset(-1);
+		IssmDouble timeend   = smb_input->GetTimeByOffset(offsetend);
+
+		_assert_(timeend>time0);
+		IssmDouble timeclim  = time;
+
+		if(time>time0 && timeend>time0){
+			IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+			if(delta==0.){
+				timeclim=timeend;
+			}
+			else{
+				timeclim=time0+delta;
+			}
 		}
 
@@ -37,24 +59,8 @@
 			int         numvertices = element->GetNumberOfVertices();
 			IssmDouble* smb         = xNew<IssmDouble>(numvertices);
-
-			/*Recover Smb*/
-			//If this is a climatology, we need to repeat the forcing after the final time
-			Input* smb_input=element->GetInput(SmbMassBalanceClimateEnum); _assert_(smb_input);
-
-			//Get accumulation climatology value
-			offsetend=dynamic_cast<TransientInput*>(smb_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(smb_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(smb_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0 & timeend>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
 			element->GetInputListOnVerticesAtTime(smb,SmbMassBalanceClimateEnum,timeclim);
 
 			/*Add input to element and Free memory*/
-			element->AddInput(SmbMassBalanceEnum,smb,P1Enum);
+			element->AddInput2(SmbMassBalanceEnum,smb,P1Enum);
 			xDelete<IssmDouble>(smb);
 		}
@@ -115,5 +121,5 @@
 
 		/*Add input to element and Free memory*/
-		element->AddInput(SmbMassBalanceEnum,smb,P1Enum);
+		element->AddInput2(SmbMassBalanceEnum,smb,P1Enum);
 		xDelete<IssmDouble>(Href);
 		xDelete<IssmDouble>(Smbref);
@@ -179,5 +185,5 @@
 
 		/*Add input to element and Free memory*/
-		element->AddInput(SmbMassBalanceEnum,smb,P1Enum);
+		element->AddInput2(SmbMassBalanceEnum,smb,P1Enum);
 		xDelete<IssmDouble>(ela);
 		xDelete<IssmDouble>(b_pos);
@@ -389,5 +395,5 @@
 
 		/*Add input to element and Free memory*/
-		element->AddInput(SmbMassBalanceEnum,smblist,P1Enum);
+		element->AddInput2(SmbMassBalanceEnum,smblist,P1Enum);
 		xDelete<IssmDouble>(surfacelist);
 		xDelete<IssmDouble>(smblistref);
@@ -404,14 +410,6 @@
 	//    surface runoff (m/yr water equivalent): runoff
 	//    OUTPUT: mass-balance (m/yr ice): agd(NA)
-	int v;
-	bool isclimatology=false;
-	IssmDouble time,delta,starttime,finaltime;
-	int offsetend=-1;
-	IssmDouble time0, timeend, timeclim;
-
+	bool isclimatology;
 	femmodel->parameters->FindParam(&isclimatology,SmbIsclimatologyEnum);
-	femmodel->parameters->FindParam(&time,TimeEnum); 
-	femmodel->parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
-	femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
 
 	/*Loop over all the elements of this partition*/
@@ -427,48 +425,60 @@
 
 		/*Recover Smb Components*/
-		if (isclimatology){
+		if(isclimatology){
+
+			int offsetend;
+			IssmDouble time0,timeend,timeclim;
+			IssmDouble time,starttime,finaltime;
+
+			/*Get time parameters*/
+			femmodel->parameters->FindParam(&time,TimeEnum); 
+			femmodel->parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
+			femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
 
 			//If this is a climatology, we need to repeat the forcing after the final time
-			Input* acc_input=element->GetInput(SmbAccumulationEnum); _assert_(acc_input);
-			Input* evap_input=element->GetInput(SmbEvaporationEnum); _assert_(evap_input);
-			Input* runoff_input=element->GetInput(SmbRunoffEnum); _assert_(runoff_input);
+			TransientInput2* acc_input    = element->inputs2->GetTransientInput(SmbAccumulationEnum); _assert_(acc_input);
+			TransientInput2* evap_input   = element->inputs2->GetTransientInput(SmbEvaporationEnum);  _assert_(evap_input);
+			TransientInput2* runoff_input = element->inputs2->GetTransientInput(SmbRunoffEnum);       _assert_(runoff_input);
 
 			//Get accumulation climatology value
-			offsetend=dynamic_cast<TransientInput*>(acc_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(acc_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(acc_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0 & timeend>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = acc_input->GetTimeInputOffset(finaltime);
+			time0     = acc_input->GetTimeByOffset(-1);
+			timeend   = acc_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(acc,SmbAccumulationEnum,timeclim);
 
 			//Get evaporation climatology value
-			offsetend=dynamic_cast<TransientInput*>(evap_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(evap_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(evap_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0 & timeend>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = evap_input->GetTimeInputOffset(finaltime);
+			time0     = evap_input->GetTimeByOffset(-1);
+			timeend   = evap_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(evap,SmbEvaporationEnum,timeclim);
 
 			//Get runoff climatology value
-			offsetend=dynamic_cast<TransientInput*>(runoff_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(runoff_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(runoff_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0 & timeend>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = runoff_input->GetTimeInputOffset(finaltime);
+			time0     = runoff_input->GetTimeByOffset(-1);
+			timeend   = runoff_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(runoff,SmbRunoffEnum,timeclim);
 		}
@@ -480,10 +490,8 @@
 
 		// loop over all vertices
-		for(v=0;v<numvertices;v++){
-			smb[v]=acc[v]-evap[v]-runoff[v];
-		}  //end of the loop over the vertices
+		for(int v=0;v<numvertices;v++) smb[v]=acc[v]-evap[v]-runoff[v];
 
 		/*Add input to element and Free memory*/
-		element->AddInput(SmbMassBalanceEnum,smb,P1Enum);
+		element->AddInput2(SmbMassBalanceEnum,smb,P1Enum);
 		xDelete<IssmDouble>(acc);
 		xDelete<IssmDouble>(evap);
@@ -502,14 +510,6 @@
 	//    refreeze of surface melt (m/yr water equivalent): refreeze
 	//    OUTPUT: mass-balance (m/yr ice): agd(NA)
-	int v;
-	bool isclimatology=false;
-	IssmDouble time,delta,starttime,finaltime;
-	int offsetend=-1;
-	IssmDouble time0, timeend, timeclim;
-
+	bool isclimatology;
 	femmodel->parameters->FindParam(&isclimatology,SmbIsclimatologyEnum);
-	femmodel->parameters->FindParam(&time,TimeEnum);
-	femmodel->parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
-	femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
 
 	/*Loop over all the elements of this partition*/
@@ -528,60 +528,72 @@
 		if (isclimatology){
 
+			int offsetend;
+			IssmDouble time0,timeend,timeclim;
+			IssmDouble time,starttime,finaltime;
+			femmodel->parameters->FindParam(&time,TimeEnum);
+			femmodel->parameters->FindParam(&finaltime,TimesteppingFinalTimeEnum);
+			femmodel->parameters->FindParam(&starttime,TimesteppingStartTimeEnum);
+
+
 			//If this is a climatology, we need to repeat the forcing after the final time
-			Input* acc_input=element->GetInput(SmbAccumulationEnum); _assert_(acc_input);
-			Input* evap_input=element->GetInput(SmbEvaporationEnum); _assert_(evap_input);
-			Input* melt_input=element->GetInput(SmbMeltEnum); _assert_(melt_input);
-			Input* refreeze_input=element->GetInput(SmbRefreezeEnum); _assert_(refreeze_input);
+			TransientInput2* acc_input      = element->inputs2->GetTransientInput(SmbAccumulationEnum); _assert_(acc_input);
+			TransientInput2* evap_input     = element->inputs2->GetTransientInput(SmbEvaporationEnum);  _assert_(evap_input);
+			TransientInput2* melt_input     = element->inputs2->GetTransientInput(SmbMeltEnum);         _assert_(melt_input);
+			TransientInput2* refreeze_input = element->inputs2->GetTransientInput(SmbRefreezeEnum);     _assert_(refreeze_input);
 
 			//Get accumulation climatology value
-			offsetend=dynamic_cast<TransientInput*>(acc_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(acc_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(acc_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0 & timeend>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = acc_input->GetTimeInputOffset(finaltime);
+			time0     = acc_input->GetTimeByOffset(-1);
+			timeend   = acc_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(acc,SmbAccumulationEnum,timeclim);
 
 			//Get evaporation climatology value
-			offsetend=dynamic_cast<TransientInput*>(evap_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(evap_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(evap_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = evap_input->GetTimeInputOffset(finaltime);
+			time0     = evap_input->GetTimeByOffset(-1);
+			timeend   = evap_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(evap,SmbEvaporationEnum,timeclim);
 
 			//Get melt climatology value
-			offsetend=dynamic_cast<TransientInput*>(melt_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(melt_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(melt_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = melt_input->GetTimeInputOffset(finaltime);
+			time0     = melt_input->GetTimeByOffset(-1);
+			timeend   = melt_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(melt,SmbMeltEnum,timeclim);
 
 			//Get refreeze climatology value
-			offsetend=dynamic_cast<TransientInput*>(refreeze_input)->GetTimeInputOffset(finaltime);
-			time0=dynamic_cast<TransientInput*>(refreeze_input)->GetTimeByOffset(-1);
-			timeend=dynamic_cast<TransientInput*>(refreeze_input)->GetTimeByOffset(offsetend);
-			timeclim=time;
-			if (time>time0){
-				delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
-				if (delta==0){ timeclim=timeend;}
-				else{ timeclim=time0+delta;}
-			}
-
+			offsetend = refreeze_input->GetTimeInputOffset(finaltime);
+			time0     = refreeze_input->GetTimeByOffset(-1);
+			timeend   = refreeze_input->GetTimeByOffset(offsetend);
+			timeclim  = time;
+			if(time>time0 & timeend>time0){
+				IssmDouble delta=(time-time0) - (timeend-time0)*(reCast<int,IssmDouble>((time-time0)/(timeend-time0)));
+				if(delta==0.)
+				 timeclim=timeend;
+				else
+				 timeclim=time0+delta;
+			}
 			element->GetInputListOnVerticesAtTime(refreeze,SmbRefreezeEnum,timeclim);
 		}
@@ -594,10 +606,8 @@
 
 		// loop over all vertices
-		for(v=0;v<numvertices;v++){
-			smb[v]=acc[v]-evap[v]-melt[v]+refreeze[v];
-		}  //end of the loop over the vertices
+		for(int v=0;v<numvertices;v++) smb[v]=acc[v]-evap[v]-melt[v]+refreeze[v];
 
 		/*Add input to element and Free memory*/
-		element->AddInput(SmbMassBalanceEnum,smb,P1Enum);
+		element->AddInput2(SmbMassBalanceEnum,smb,P1Enum);
 		xDelete<IssmDouble>(acc);
 		xDelete<IssmDouble>(evap);
Index: /issm/trunk/src/c/modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SurfaceRelVelMisfitx/SurfaceRelVelMisfitx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void SurfaceRelVelMisfitx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -61,12 +62,12 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input=topelement->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* vx_input     =topelement->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vxobs_input  =topelement->GetInput(InversionVxObsEnum);                     _assert_(vxobs_input);
-	Input* vy_input     = NULL;
-	Input* vyobs_input  = NULL;
+	DatasetInput2* weights_input=topelement->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* vx_input     =topelement->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vxobs_input  =topelement->GetInput2(InversionVxObsEnum);                     _assert_(vxobs_input);
+	Input2* vy_input     = NULL;
+	Input2* vyobs_input  = NULL;
 	if(numcomponents==2){
-		vy_input    =topelement->GetInput(VyEnum);              _assert_(vy_input);
-		vyobs_input =topelement->GetInput(InversionVyObsEnum);  _assert_(vyobs_input);
+		vy_input    =topelement->GetInput2(VyEnum);              _assert_(vy_input);
+		vyobs_input =topelement->GetInput2(InversionVyObsEnum);  _assert_(vyobs_input);
 	}
 
Index: /issm/trunk/src/c/modules/SystemMatricesx/SystemMatricesx.cpp
===================================================================
--- /issm/trunk/src/c/modules/SystemMatricesx/SystemMatricesx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/SystemMatricesx/SystemMatricesx.cpp	(revision 24686)
@@ -8,5 +8,5 @@
 #include "../AllocateSystemMatricesx/AllocateSystemMatricesx.h"
 
-void SystemMatricesx(Matrix<IssmDouble>** pKff, Matrix<IssmDouble>** pKfs, Vector<IssmDouble>** ppf, Vector<IssmDouble>** pdf, IssmDouble* pkmax,FemModel* femmodel){
+void SystemMatricesx(Matrix<IssmDouble>** pKff, Matrix<IssmDouble>** pKfs, Vector<IssmDouble>** ppf, Vector<IssmDouble>** pdf, IssmDouble* pkmax,FemModel* femmodel, bool isAllocated){
 
 	/*intermediary: */
@@ -43,4 +43,5 @@
 		for (i=0;i<femmodel->elements->Size();i++){
 			element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
+			if(!element->AnyFSet() && analysisenum!=StressbalanceAnalysisEnum) continue;
 			ElementMatrix* Ke = analysis->CreateKMatrix(element);
 			ElementVector* pe = analysis->CreatePVector(element);
@@ -63,5 +64,13 @@
 
 	/*Allocate stiffness matrices and load vector*/
-	AllocateSystemMatricesx(&Kff,&Kfs,&df,&pf,femmodel);
+	if(isAllocated) {
+		Kff  = *pKff;
+		Kfs  = *pKfs;
+		pf   = *ppf;
+		df   = *pdf;
+	}
+	else {
+		AllocateSystemMatricesx(&Kff,&Kfs,&df,&pf,femmodel);
+	}
 
 	/*Display size*/
@@ -76,4 +85,5 @@
 	for (i=0;i<femmodel->elements->Size();i++){
 		element=xDynamicCast<Element*>(femmodel->elements->GetObjectByOffset(i));
+		if(!element->AnyFSet() && analysisenum!=StressbalanceAnalysisEnum) continue;
 		ElementMatrix* Ke = analysis->CreateKMatrix(element);
 		ElementVector* pe = analysis->CreatePVector(element);
Index: /issm/trunk/src/c/modules/SystemMatricesx/SystemMatricesx.h
===================================================================
--- /issm/trunk/src/c/modules/SystemMatricesx/SystemMatricesx.h	(revision 24685)
+++ /issm/trunk/src/c/modules/SystemMatricesx/SystemMatricesx.h	(revision 24686)
@@ -9,5 +9,5 @@
 
 /* local prototypes: */
-void SystemMatricesx(Matrix<IssmDouble>** pKff, Matrix<IssmDouble>** pKfs, Vector<IssmDouble>** ppf, Vector<IssmDouble>** pdf, IssmDouble* pkmax,FemModel* femmodel);
+void SystemMatricesx(Matrix<IssmDouble>** pKff, Matrix<IssmDouble>** pKfs, Vector<IssmDouble>** ppf, Vector<IssmDouble>** pdf, IssmDouble* pkmax,FemModel* femmodel, bool isAllocated=false);
 
 #endif  /* _SYSTEMMATRICESX_H */
Index: /issm/trunk/src/c/modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ThicknessAbsMisfitx/ThicknessAbsMisfitx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void ThicknessAbsMisfitx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -43,7 +44,7 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input     =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* thickness_input   =element->GetInput(ThicknessEnum);                          _assert_(thickness_input);
-	Input* thicknessobs_input=element->GetInput(InversionThicknessObsEnum);              _assert_(thicknessobs_input);
+	DatasetInput2* weights_input     =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* thickness_input   =element->GetInput2(ThicknessEnum);                          _assert_(thickness_input);
+	Input2* thicknessobs_input=element->GetInput2(InversionThicknessObsEnum);              _assert_(thicknessobs_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ThicknessAcrossGradientx/ThicknessAcrossGradientx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void ThicknessAcrossGradientx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -45,8 +46,8 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input   =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* thickness_input =element->GetInput(ThicknessEnum);                          _assert_(thickness_input);
-	Input* vx_input        =element->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vy_input        =element->GetInput(VyEnum);                                 _assert_(vy_input);
+	DatasetInput2* weights_input   =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* thickness_input =element->GetInput2(ThicknessEnum);                          _assert_(thickness_input);
+	Input2* vx_input        =element->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vy_input        =element->GetInput2(VyEnum);                                 _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp
===================================================================
--- /issm/trunk/src/c/modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp	(revision 24685)
+++ /issm/trunk/src/c/modules/ThicknessAlongGradientx/ThicknessAlongGradientx.cpp	(revision 24686)
@@ -7,4 +7,5 @@
 #include "../../shared/shared.h"
 #include "../../toolkits/toolkits.h"
+#include "../../classes/Inputs2/DatasetInput2.h"
 
 void ThicknessAlongGradientx( IssmDouble* pJ, Elements* elements,Nodes* nodes, Vertices* vertices, Loads* loads, Materials* materials,Parameters* parameters){
@@ -45,8 +46,8 @@
 
 	/*Retrieve all inputs we will be needing: */
-	Input* weights_input   =element->GetInput(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
-	Input* thickness_input =element->GetInput(ThicknessEnum);                          _assert_(thickness_input);
-	Input* vx_input        =element->GetInput(VxEnum);                                 _assert_(vx_input);
-	Input* vy_input        =element->GetInput(VyEnum);                                 _assert_(vy_input);
+	DatasetInput2* weights_input   =element->GetDatasetInput2(InversionCostFunctionsCoefficientsEnum); _assert_(weights_input);
+	Input2* thickness_input =element->GetInput2(ThicknessEnum);                          _assert_(thickness_input);
+	Input2* vx_input        =element->GetInput2(VxEnum);                                 _assert_(vx_input);
+	Input2* vy_input        =element->GetInput2(VyEnum);                                 _assert_(vy_input);
 
 	/* Start  looping on the number of gaussian points: */
Index: /issm/trunk/src/c/shared/Bamg/Abs.h
===================================================================
--- /issm/trunk/src/c/shared/Bamg/Abs.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Bamg/Abs.h	(revision 24686)
@@ -3,7 +3,5 @@
 
 namespace bamg {
-
 	template<class T> inline T Abs (const T &a){return a <0 ? -a : a;}
-
 }
 #endif
Index: /issm/trunk/src/c/shared/Bamg/BigPrimeNumber.h
===================================================================
--- /issm/trunk/src/c/shared/Bamg/BigPrimeNumber.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Bamg/BigPrimeNumber.h	(revision 24686)
@@ -3,7 +3,5 @@
 
 namespace bamg {
-
 	long BigPrimeNumber(long n);
-
 }
 #endif
Index: /issm/trunk/src/c/shared/Bamg/Exchange.h
===================================================================
--- /issm/trunk/src/c/shared/Bamg/Exchange.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Bamg/Exchange.h	(revision 24686)
@@ -2,5 +2,5 @@
 #define _EXCHANGE_H_
 
-template<class T> inline void Exchange (T &a,T &b) {T c=a;a=b;b=c;}
+template<class T> inline void Exchange(T &a,T &b){T c=a;a=b;b=c;}
 
 #endif
Index: /issm/trunk/src/c/shared/Bamg/OppositeAngle.h
===================================================================
--- /issm/trunk/src/c/shared/Bamg/OppositeAngle.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Bamg/OppositeAngle.h	(revision 24686)
@@ -7,5 +7,5 @@
 namespace bamg {
 	inline float  OppositeAngle(float  a){return a<0 ? PI+a:a-PI;}
-	inline double OppositeAngle(double a){return a<0 ?  PI+a:a- PI;}
+	inline double OppositeAngle(double a){return a<0 ? PI+a:a-PI;}
 }
 
Index: /issm/trunk/src/c/shared/Elements/PrintArrays.cpp
===================================================================
--- /issm/trunk/src/c/shared/Elements/PrintArrays.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/Elements/PrintArrays.cpp	(revision 24686)
@@ -1,4 +1,5 @@
 #include "./elements.h"
 #include "../io/Print/Print.h"
+#include <iomanip>
 using namespace std;
 
Index: /issm/trunk/src/c/shared/Enum/Enum.vim
===================================================================
--- /issm/trunk/src/c/shared/Enum/Enum.vim	(revision 24685)
+++ /issm/trunk/src/c/shared/Enum/Enum.vim	(revision 24686)
@@ -90,5 +90,4 @@
 syn keyword cConstant BasalforcingsPicoMaxboxcountEnum
 syn keyword cConstant BasalforcingsPicoNumBasinsEnum
-syn keyword cConstant BasalforcingsPicoOverturningCoeffEnum
 syn keyword cConstant BasalforcingsPlumeradiusEnum
 syn keyword cConstant BasalforcingsPlumexEnum
@@ -130,4 +129,8 @@
 syn keyword cConstant DomainDimensionEnum
 syn keyword cConstant DomainTypeEnum
+syn keyword cConstant DslModelEnum
+syn keyword cConstant DslModelidEnum
+syn keyword cConstant DslNummodelsEnum
+syn keyword cConstant DslComputeFingerprintsEnum
 syn keyword cConstant EarthIdEnum
 syn keyword cConstant EplZigZagCounterEnum
@@ -146,4 +149,5 @@
 syn keyword cConstant FrictionCouplingEnum
 syn keyword cConstant FrictionDeltaEnum
+syn keyword cConstant FrictionEffectivePressureLimitEnum
 syn keyword cConstant FrictionFEnum
 syn keyword cConstant FrictionGammaEnum
@@ -282,6 +286,4 @@
 syn keyword cConstant OutputFilePointerEnum
 syn keyword cConstant OutputdefinitionEnum
-syn keyword cConstant ParamEnum
-syn keyword cConstant ParametersEnum
 syn keyword cConstant QmuErrNameEnum
 syn keyword cConstant QmuInNameEnum
@@ -289,4 +291,6 @@
 syn keyword cConstant QmuNumberofpartitionsEnum
 syn keyword cConstant QmuOutNameEnum
+syn keyword cConstant QmuOutputEnum
+syn keyword cConstant QmuCurrEvalIdEnum
 syn keyword cConstant QmuEpartitionEnum
 syn keyword cConstant QmuVpartitionEnum
@@ -380,4 +384,6 @@
 syn keyword cConstant SmbTdiffEnum
 syn keyword cConstant SmbThermoDeltaTScalingEnum
+syn keyword cConstant SmbTemperaturesReconstructedYearsEnum
+syn keyword cConstant SmbPrecipitationsReconstructedYearsEnum
 syn keyword cConstant SmoothThicknessMultiplierEnum
 syn keyword cConstant SolutionTypeEnum
@@ -460,4 +466,5 @@
 syn keyword cConstant BasalforcingsGeothermalfluxEnum
 syn keyword cConstant BasalforcingsGroundediceMeltingRateEnum
+syn keyword cConstant BasalforcingsPerturbationMeltingRateEnum
 syn keyword cConstant BasalforcingsIsmip6BasinIdEnum
 syn keyword cConstant BasalforcingsIsmip6TfEnum
@@ -468,4 +475,5 @@
 syn keyword cConstant BasalforcingsPicoBasinIdEnum
 syn keyword cConstant BasalforcingsPicoBoxIdEnum
+syn keyword cConstant BasalforcingsPicoOverturningCoeffEnum
 syn keyword cConstant BasalforcingsPicoSubShelfOceanOverturningEnum
 syn keyword cConstant BasalforcingsPicoSubShelfOceanSalinityEnum
@@ -475,4 +483,5 @@
 syn keyword cConstant BasalStressEnum
 syn keyword cConstant BaseEnum
+syn keyword cConstant BaseOldEnum
 syn keyword cConstant BaseSlopeXEnum
 syn keyword cConstant BaseSlopeYEnum
@@ -495,5 +504,7 @@
 syn keyword cConstant CrevasseDepthEnum
 syn keyword cConstant DamageDEnum
+syn keyword cConstant DamageDOldEnum
 syn keyword cConstant DamageDbarEnum
+syn keyword cConstant DamageDbarOldEnum
 syn keyword cConstant DamageFEnum
 syn keyword cConstant DegreeOfChannelizationEnum
@@ -516,4 +527,9 @@
 syn keyword cConstant DrivingStressXEnum
 syn keyword cConstant DrivingStressYEnum
+syn keyword cConstant DslGlobalAverageThermostericSeaLevelChangeEnum
+syn keyword cConstant DslSeaSurfaceHeightChangeAboveGeoidEnum
+syn keyword cConstant DslStericRateEnum
+syn keyword cConstant DslDynamicRateEnum
+syn keyword cConstant DummyEnum
 syn keyword cConstant EffectivePressureEnum
 syn keyword cConstant EffectivePressureSubstepEnum
@@ -631,4 +647,5 @@
 syn keyword cConstant P0Enum
 syn keyword cConstant P1Enum
+syn keyword cConstant PartitioningEnum
 syn keyword cConstant PressureEnum
 syn keyword cConstant RadarEnum
@@ -660,7 +677,7 @@
 syn keyword cConstant SealevelUNorthEsaEnum
 syn keyword cConstant SealevelriseCumDeltathicknessEnum
+syn keyword cConstant SealevelriseCumDeltathicknessOldEnum
 syn keyword cConstant SealevelriseDeltathicknessEnum
 syn keyword cConstant SealevelriseSpcthicknessEnum
-syn keyword cConstant SealevelriseStericRateEnum
 syn keyword cConstant SealevelriseHydroRateEnum
 syn keyword cConstant SedimentHeadEnum
@@ -701,4 +718,5 @@
 syn keyword cConstant SmbEAirEnum
 syn keyword cConstant SmbECEnum
+syn keyword cConstant SmbECDtEnum
 syn keyword cConstant SmbECiniEnum
 syn keyword cConstant SmbElaEnum
@@ -758,4 +776,5 @@
 syn keyword cConstant SmbVzEnum
 syn keyword cConstant SmbWEnum
+syn keyword cConstant SmbWAddEnum
 syn keyword cConstant SmbWiniEnum
 syn keyword cConstant SmbZMaxEnum
@@ -785,4 +804,5 @@
 syn keyword cConstant SurfaceCrevasseEnum
 syn keyword cConstant SurfaceEnum
+syn keyword cConstant SurfaceOldEnum
 syn keyword cConstant SurfaceLogVelMisfitEnum
 syn keyword cConstant SurfaceLogVxVyMisfitEnum
@@ -953,4 +973,6 @@
 syn keyword cConstant BoolExternalResultEnum
 syn keyword cConstant BoolInputEnum
+syn keyword cConstant BoolInput2Enum
+syn keyword cConstant IntInput2Enum
 syn keyword cConstant BoolParamEnum
 syn keyword cConstant BoundaryEnum
@@ -973,4 +995,5 @@
 syn keyword cConstant ContoursEnum
 syn keyword cConstant ControlInputEnum
+syn keyword cConstant ControlInput2Enum
 syn keyword cConstant ControlInputGradEnum
 syn keyword cConstant ControlInputMaxsEnum
@@ -985,4 +1008,5 @@
 syn keyword cConstant DataSetParamEnum
 syn keyword cConstant DatasetInputEnum
+syn keyword cConstant DatasetInput2Enum
 syn keyword cConstant DefaultAnalysisEnum
 syn keyword cConstant DefaultCalvingEnum
@@ -994,4 +1018,5 @@
 syn keyword cConstant Domain3DsurfaceEnum
 syn keyword cConstant DoubleArrayInputEnum
+syn keyword cConstant ArrayInput2Enum
 syn keyword cConstant DoubleExternalResultEnum
 syn keyword cConstant DoubleInputEnum
@@ -1074,8 +1099,13 @@
 syn keyword cConstant IntExternalResultEnum
 syn keyword cConstant IntInputEnum
+syn keyword cConstant ElementInput2Enum
+syn keyword cConstant SegInput2Enum
+syn keyword cConstant TriaInput2Enum
+syn keyword cConstant PentaInput2Enum
 syn keyword cConstant IntMatExternalResultEnum
 syn keyword cConstant IntMatParamEnum
 syn keyword cConstant IntParamEnum
 syn keyword cConstant IntVecParamEnum
+syn keyword cConstant Inputs2Enum
 syn keyword cConstant InternalEnum
 syn keyword cConstant IntersectEnum
@@ -1157,4 +1187,6 @@
 syn keyword cConstant OpenEnum
 syn keyword cConstant OptionEnum
+syn keyword cConstant ParamEnum
+syn keyword cConstant ParametersEnum
 syn keyword cConstant P0ArrayEnum
 syn keyword cConstant P0DGEnum
@@ -1259,4 +1291,5 @@
 syn keyword cConstant TransientArrayParamEnum
 syn keyword cConstant TransientInputEnum
+syn keyword cConstant TransientInput2Enum
 syn keyword cConstant TransientParamEnum
 syn keyword cConstant TransientSolutionEnum
@@ -1298,5 +1331,6 @@
 syn keyword cType AdaptiveMeshRefinement
 syn keyword cType AmrBamg
-syn keyword cType BoolInput
+syn keyword cType ArrayInput2
+syn keyword cType BoolInput2
 syn keyword cType BoolParam
 syn keyword cType Cfdragcoeffabsgrad
@@ -1308,12 +1342,10 @@
 syn keyword cType Contour
 syn keyword cType Contours
-syn keyword cType ControlInput
+syn keyword cType ControlInput2
 syn keyword cType Covertree
 syn keyword cType DataSetParam
-syn keyword cType DatasetInput
+syn keyword cType DatasetInput2
 syn keyword cType Definition
 syn keyword cType DependentObject
-syn keyword cType DoubleArrayInput
-syn keyword cType DoubleInput
 syn keyword cType DoubleMatArrayParam
 syn keyword cType DoubleMatParam
@@ -1323,4 +1355,5 @@
 syn keyword cType Element
 syn keyword cType ElementHook
+syn keyword cType ElementInput2
 syn keyword cType ElementMatrix
 syn keyword cType ElementVector
@@ -1342,7 +1375,7 @@
 syn keyword cType GiaDeflectionCoreArgs
 syn keyword cType Hook
-syn keyword cType Input
-syn keyword cType Inputs
-syn keyword cType IntInput
+syn keyword cType Input2
+syn keyword cType Inputs2
+syn keyword cType IntInput2
 syn keyword cType IntMatParam
 syn keyword cType IntParam
@@ -1380,5 +1413,5 @@
 syn keyword cType Penpair
 syn keyword cType Penta
-syn keyword cType PentaInput
+syn keyword cType PentaInput2
 syn keyword cType PentaRef
 syn keyword cType PowerVariogram
@@ -1391,5 +1424,5 @@
 syn keyword cType Riftfront
 syn keyword cType Seg
-syn keyword cType SegInput
+syn keyword cType SegInput2
 syn keyword cType SegRef
 syn keyword cType Segment
@@ -1401,11 +1434,10 @@
 syn keyword cType StringParam
 syn keyword cType Tetra
-syn keyword cType TetraInput
 syn keyword cType TetraRef
 syn keyword cType TransientArrayParam
-syn keyword cType TransientInput
+syn keyword cType TransientInput2
 syn keyword cType TransientParam
 syn keyword cType Tria
-syn keyword cType TriaInput
+syn keyword cType TriaInput2
 syn keyword cType TriaRef
 syn keyword cType Variogram
Index: /issm/trunk/src/c/shared/Enum/EnumDefinitions.h
===================================================================
--- /issm/trunk/src/c/shared/Enum/EnumDefinitions.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Enum/EnumDefinitions.h	(revision 24686)
@@ -84,5 +84,4 @@
 	BasalforcingsPicoMaxboxcountEnum,
 	BasalforcingsPicoNumBasinsEnum,
-	BasalforcingsPicoOverturningCoeffEnum,
 	BasalforcingsPlumeradiusEnum,
 	BasalforcingsPlumexEnum,
@@ -124,4 +123,8 @@
 	DomainDimensionEnum,
 	DomainTypeEnum,
+	DslModelEnum,
+	DslModelidEnum,
+	DslNummodelsEnum,
+	DslComputeFingerprintsEnum,
 	EarthIdEnum,
 	EplZigZagCounterEnum,
@@ -140,4 +143,5 @@
 	FrictionCouplingEnum,
 	FrictionDeltaEnum,
+	FrictionEffectivePressureLimitEnum,
 	FrictionFEnum,
 	FrictionGammaEnum,
@@ -276,6 +280,4 @@
 	OutputFilePointerEnum,
 	OutputdefinitionEnum,
-	ParamEnum,
-	ParametersEnum,
 	QmuErrNameEnum,
 	QmuInNameEnum,
@@ -283,4 +285,6 @@
 	QmuNumberofpartitionsEnum,
 	QmuOutNameEnum,
+	QmuOutputEnum,
+	QmuCurrEvalIdEnum,
 	QmuEpartitionEnum,
 	QmuVpartitionEnum,
@@ -374,4 +378,6 @@
 	SmbTdiffEnum,
 	SmbThermoDeltaTScalingEnum,
+	SmbTemperaturesReconstructedYearsEnum,
+	SmbPrecipitationsReconstructedYearsEnum,
 	SmoothThicknessMultiplierEnum,
 	SolutionTypeEnum,
@@ -456,4 +462,5 @@
 	BasalforcingsGeothermalfluxEnum,
 	BasalforcingsGroundediceMeltingRateEnum,
+	BasalforcingsPerturbationMeltingRateEnum,
 	BasalforcingsIsmip6BasinIdEnum,
 	BasalforcingsIsmip6TfEnum,
@@ -464,4 +471,5 @@
 	BasalforcingsPicoBasinIdEnum,
 	BasalforcingsPicoBoxIdEnum,
+	BasalforcingsPicoOverturningCoeffEnum,
 	BasalforcingsPicoSubShelfOceanOverturningEnum,
 	BasalforcingsPicoSubShelfOceanSalinityEnum,
@@ -471,4 +479,5 @@
 	BasalStressEnum,
 	BaseEnum,
+	BaseOldEnum,
 	BaseSlopeXEnum,
 	BaseSlopeYEnum,
@@ -491,5 +500,7 @@
 	CrevasseDepthEnum,
 	DamageDEnum,
+	DamageDOldEnum,
 	DamageDbarEnum,
+	DamageDbarOldEnum,
 	DamageFEnum,
 	DegreeOfChannelizationEnum,
@@ -512,4 +523,10 @@
 	DrivingStressXEnum,
 	DrivingStressYEnum,
+	DslGlobalAverageThermostericSeaLevelChangeEnum,
+	DslSeaSurfaceHeightChangeAboveGeoidEnum,
+	DslSeaWaterPressureChangeAtSeaFloor,
+	DslStericRateEnum,
+	DslDynamicRateEnum,
+	DummyEnum,
    EffectivePressureEnum,
 	EffectivePressureSubstepEnum,
@@ -627,4 +644,5 @@
 	P0Enum,
 	P1Enum,
+	PartitioningEnum,
 	PressureEnum,
 	RadarEnum,
@@ -656,10 +674,10 @@
 	SealevelUNorthEsaEnum,
 	SealevelriseCumDeltathicknessEnum,
+	SealevelriseCumDeltathicknessOldEnum,
 	SealevelriseDeltathicknessEnum,
 	SealevelriseSpcthicknessEnum,
-	SealevelriseStericRateEnum,
 	SealevelriseHydroRateEnum,
-   SedimentHeadEnum,
-   SedimentHeadOldEnum,
+	SedimentHeadEnum,
+	SedimentHeadOldEnum,
 	SedimentHeadSubstepEnum,
 	SedimentHeadTransientEnum,
@@ -697,4 +715,5 @@
 	SmbEAirEnum,
 	SmbECEnum,
+	SmbECDtEnum,
 	SmbECiniEnum,
 	SmbElaEnum,
@@ -716,4 +735,5 @@
 	SmbMeanULWEnum,
 	SmbMeltEnum,
+	SmbMInitnum,
 	SmbMonthlytemperaturesEnum,
 	SmbNetLWEnum,
@@ -754,4 +774,5 @@
 	SmbVzEnum,
 	SmbWEnum,
+	SmbWAddEnum,
 	SmbWiniEnum,
 	SmbZMaxEnum,
@@ -781,4 +802,5 @@
 	SurfaceCrevasseEnum,
 	SurfaceEnum,
+	SurfaceOldEnum,
 	SurfaceLogVelMisfitEnum,
 	SurfaceLogVxVyMisfitEnum,
@@ -951,4 +973,6 @@
 	BoolExternalResultEnum,
 	BoolInputEnum,
+	BoolInput2Enum,
+	IntInput2Enum,
 	BoolParamEnum,
 	BoundaryEnum,
@@ -971,4 +995,5 @@
 	ContoursEnum,
 	ControlInputEnum,
+	ControlInput2Enum,
 	ControlInputGradEnum,
 	ControlInputMaxsEnum,
@@ -983,4 +1008,5 @@
 	DataSetParamEnum,
 	DatasetInputEnum,
+	DatasetInput2Enum,
 	DefaultAnalysisEnum,
 	DefaultCalvingEnum,
@@ -992,4 +1018,5 @@
 	Domain3DsurfaceEnum,
 	DoubleArrayInputEnum,
+	ArrayInput2Enum,
 	DoubleExternalResultEnum,
 	DoubleInputEnum,
@@ -1072,8 +1099,13 @@
 	IntExternalResultEnum,
 	IntInputEnum,
+	ElementInput2Enum,
+	SegInput2Enum,
+	TriaInput2Enum,
+	PentaInput2Enum,
 	IntMatExternalResultEnum,
 	IntMatParamEnum,
 	IntParamEnum,
 	IntVecParamEnum,
+	Inputs2Enum,
 	InternalEnum,
 	IntersectEnum,
@@ -1155,4 +1187,6 @@
 	OpenEnum,
 	OptionEnum,
+	ParamEnum,
+	ParametersEnum,
 	P0ArrayEnum,
 	P0DGEnum,
@@ -1257,4 +1291,5 @@
 	TransientArrayParamEnum,
 	TransientInputEnum,
+	TransientInput2Enum,
 	TransientParamEnum,
 	TransientSolutionEnum,
Index: /issm/trunk/src/c/shared/Enum/EnumToStringx.cpp
===================================================================
--- /issm/trunk/src/c/shared/Enum/EnumToStringx.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/Enum/EnumToStringx.cpp	(revision 24686)
@@ -92,5 +92,4 @@
 		case BasalforcingsPicoMaxboxcountEnum : return "BasalforcingsPicoMaxboxcount";
 		case BasalforcingsPicoNumBasinsEnum : return "BasalforcingsPicoNumBasins";
-		case BasalforcingsPicoOverturningCoeffEnum : return "BasalforcingsPicoOverturningCoeff";
 		case BasalforcingsPlumeradiusEnum : return "BasalforcingsPlumeradius";
 		case BasalforcingsPlumexEnum : return "BasalforcingsPlumex";
@@ -132,4 +131,8 @@
 		case DomainDimensionEnum : return "DomainDimension";
 		case DomainTypeEnum : return "DomainType";
+		case DslModelEnum : return "DslModel";
+		case DslModelidEnum : return "DslModelid";
+		case DslNummodelsEnum : return "DslNummodels";
+		case DslComputeFingerprintsEnum : return "DslComputeFingerprints";
 		case EarthIdEnum : return "EarthId";
 		case EplZigZagCounterEnum : return "EplZigZagCounter";
@@ -148,4 +151,5 @@
 		case FrictionCouplingEnum : return "FrictionCoupling";
 		case FrictionDeltaEnum : return "FrictionDelta";
+		case FrictionEffectivePressureLimitEnum : return "FrictionEffectivePressureLimit";
 		case FrictionFEnum : return "FrictionF";
 		case FrictionGammaEnum : return "FrictionGamma";
@@ -284,6 +288,4 @@
 		case OutputFilePointerEnum : return "OutputFilePointer";
 		case OutputdefinitionEnum : return "Outputdefinition";
-		case ParamEnum : return "Param";
-		case ParametersEnum : return "Parameters";
 		case QmuErrNameEnum : return "QmuErrName";
 		case QmuInNameEnum : return "QmuInName";
@@ -291,4 +293,6 @@
 		case QmuNumberofpartitionsEnum : return "QmuNumberofpartitions";
 		case QmuOutNameEnum : return "QmuOutName";
+		case QmuOutputEnum : return "QmuOutput";
+		case QmuCurrEvalIdEnum : return "QmuCurrEvalId";
 		case QmuEpartitionEnum : return "QmuEpartition";
 		case QmuVpartitionEnum : return "QmuVpartition";
@@ -382,4 +386,6 @@
 		case SmbTdiffEnum : return "SmbTdiff";
 		case SmbThermoDeltaTScalingEnum : return "SmbThermoDeltaTScaling";
+		case SmbTemperaturesReconstructedYearsEnum : return "SmbTemperaturesReconstructedYears";
+		case SmbPrecipitationsReconstructedYearsEnum : return "SmbPrecipitationsReconstructedYears";
 		case SmoothThicknessMultiplierEnum : return "SmoothThicknessMultiplier";
 		case SolutionTypeEnum : return "SolutionType";
@@ -462,4 +468,5 @@
 		case BasalforcingsGeothermalfluxEnum : return "BasalforcingsGeothermalflux";
 		case BasalforcingsGroundediceMeltingRateEnum : return "BasalforcingsGroundediceMeltingRate";
+		case BasalforcingsPerturbationMeltingRateEnum : return "BasalforcingsPerturbationMeltingRate";
 		case BasalforcingsIsmip6BasinIdEnum : return "BasalforcingsIsmip6BasinId";
 		case BasalforcingsIsmip6TfEnum : return "BasalforcingsIsmip6Tf";
@@ -470,4 +477,5 @@
 		case BasalforcingsPicoBasinIdEnum : return "BasalforcingsPicoBasinId";
 		case BasalforcingsPicoBoxIdEnum : return "BasalforcingsPicoBoxId";
+		case BasalforcingsPicoOverturningCoeffEnum : return "BasalforcingsPicoOverturningCoeff";
 		case BasalforcingsPicoSubShelfOceanOverturningEnum : return "BasalforcingsPicoSubShelfOceanOverturning";
 		case BasalforcingsPicoSubShelfOceanSalinityEnum : return "BasalforcingsPicoSubShelfOceanSalinity";
@@ -477,4 +485,5 @@
 		case BasalStressEnum : return "BasalStress";
 		case BaseEnum : return "Base";
+		case BaseOldEnum : return "BaseOld";
 		case BaseSlopeXEnum : return "BaseSlopeX";
 		case BaseSlopeYEnum : return "BaseSlopeY";
@@ -497,5 +506,7 @@
 		case CrevasseDepthEnum : return "CrevasseDepth";
 		case DamageDEnum : return "DamageD";
+		case DamageDOldEnum : return "DamageDOld";
 		case DamageDbarEnum : return "DamageDbar";
+		case DamageDbarOldEnum : return "DamageDbarOld";
 		case DamageFEnum : return "DamageF";
 		case DegreeOfChannelizationEnum : return "DegreeOfChannelization";
@@ -518,4 +529,9 @@
 		case DrivingStressXEnum : return "DrivingStressX";
 		case DrivingStressYEnum : return "DrivingStressY";
+		case DslGlobalAverageThermostericSeaLevelChangeEnum : return "DslGlobalAverageThermostericSeaLevelChange";
+		case DslSeaSurfaceHeightChangeAboveGeoidEnum : return "DslSeaSurfaceHeightChangeAboveGeoid";
+		case DslStericRateEnum : return "DslStericRate";
+		case DslDynamicRateEnum : return "DslDynamicRate";
+		case DummyEnum : return "Dummy";
 		case EffectivePressureEnum : return "EffectivePressure";
 		case EffectivePressureSubstepEnum : return "EffectivePressureSubstep";
@@ -633,4 +649,5 @@
 		case P0Enum : return "P0";
 		case P1Enum : return "P1";
+		case PartitioningEnum : return "Partitioning";
 		case PressureEnum : return "Pressure";
 		case RadarEnum : return "Radar";
@@ -662,7 +679,7 @@
 		case SealevelUNorthEsaEnum : return "SealevelUNorthEsa";
 		case SealevelriseCumDeltathicknessEnum : return "SealevelriseCumDeltathickness";
+		case SealevelriseCumDeltathicknessOldEnum : return "SealevelriseCumDeltathicknessOld";
 		case SealevelriseDeltathicknessEnum : return "SealevelriseDeltathickness";
 		case SealevelriseSpcthicknessEnum : return "SealevelriseSpcthickness";
-		case SealevelriseStericRateEnum : return "SealevelriseStericRate";
 		case SealevelriseHydroRateEnum : return "SealevelriseHydroRate";
 		case SedimentHeadEnum : return "SedimentHead";
@@ -703,4 +720,5 @@
 		case SmbEAirEnum : return "SmbEAir";
 		case SmbECEnum : return "SmbEC";
+		case SmbECDtEnum : return "SmbECDt";
 		case SmbECiniEnum : return "SmbECini";
 		case SmbElaEnum : return "SmbEla";
@@ -760,4 +778,5 @@
 		case SmbVzEnum : return "SmbVz";
 		case SmbWEnum : return "SmbW";
+		case SmbWAddEnum : return "SmbWAdd";
 		case SmbWiniEnum : return "SmbWini";
 		case SmbZMaxEnum : return "SmbZMax";
@@ -787,4 +806,5 @@
 		case SurfaceCrevasseEnum : return "SurfaceCrevasse";
 		case SurfaceEnum : return "Surface";
+		case SurfaceOldEnum : return "SurfaceOld";
 		case SurfaceLogVelMisfitEnum : return "SurfaceLogVelMisfit";
 		case SurfaceLogVxVyMisfitEnum : return "SurfaceLogVxVyMisfit";
@@ -955,4 +975,6 @@
 		case BoolExternalResultEnum : return "BoolExternalResult";
 		case BoolInputEnum : return "BoolInput";
+		case BoolInput2Enum : return "BoolInput2";
+		case IntInput2Enum : return "IntInput2";
 		case BoolParamEnum : return "BoolParam";
 		case BoundaryEnum : return "Boundary";
@@ -975,4 +997,5 @@
 		case ContoursEnum : return "Contours";
 		case ControlInputEnum : return "ControlInput";
+		case ControlInput2Enum : return "ControlInput2";
 		case ControlInputGradEnum : return "ControlInputGrad";
 		case ControlInputMaxsEnum : return "ControlInputMaxs";
@@ -987,4 +1010,5 @@
 		case DataSetParamEnum : return "DataSetParam";
 		case DatasetInputEnum : return "DatasetInput";
+		case DatasetInput2Enum : return "DatasetInput2";
 		case DefaultAnalysisEnum : return "DefaultAnalysis";
 		case DefaultCalvingEnum : return "DefaultCalving";
@@ -996,4 +1020,5 @@
 		case Domain3DsurfaceEnum : return "Domain3Dsurface";
 		case DoubleArrayInputEnum : return "DoubleArrayInput";
+		case ArrayInput2Enum : return "ArrayInput2";
 		case DoubleExternalResultEnum : return "DoubleExternalResult";
 		case DoubleInputEnum : return "DoubleInput";
@@ -1076,8 +1101,13 @@
 		case IntExternalResultEnum : return "IntExternalResult";
 		case IntInputEnum : return "IntInput";
+		case ElementInput2Enum : return "ElementInput2";
+		case SegInput2Enum : return "SegInput2";
+		case TriaInput2Enum : return "TriaInput2";
+		case PentaInput2Enum : return "PentaInput2";
 		case IntMatExternalResultEnum : return "IntMatExternalResult";
 		case IntMatParamEnum : return "IntMatParam";
 		case IntParamEnum : return "IntParam";
 		case IntVecParamEnum : return "IntVecParam";
+		case Inputs2Enum : return "Inputs2";
 		case InternalEnum : return "Internal";
 		case IntersectEnum : return "Intersect";
@@ -1159,4 +1189,6 @@
 		case OpenEnum : return "Open";
 		case OptionEnum : return "Option";
+		case ParamEnum : return "Param";
+		case ParametersEnum : return "Parameters";
 		case P0ArrayEnum : return "P0Array";
 		case P0DGEnum : return "P0DG";
@@ -1261,4 +1293,5 @@
 		case TransientArrayParamEnum : return "TransientArrayParam";
 		case TransientInputEnum : return "TransientInput";
+		case TransientInput2Enum : return "TransientInput2";
 		case TransientParamEnum : return "TransientParam";
 		case TransientSolutionEnum : return "TransientSolution";
Index: /issm/trunk/src/c/shared/Enum/StringToEnumx.cpp
===================================================================
--- /issm/trunk/src/c/shared/Enum/StringToEnumx.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/Enum/StringToEnumx.cpp	(revision 24686)
@@ -92,5 +92,4 @@
 	      else if (strcmp(name,"BasalforcingsPicoMaxboxcount")==0) return BasalforcingsPicoMaxboxcountEnum;
 	      else if (strcmp(name,"BasalforcingsPicoNumBasins")==0) return BasalforcingsPicoNumBasinsEnum;
-	      else if (strcmp(name,"BasalforcingsPicoOverturningCoeff")==0) return BasalforcingsPicoOverturningCoeffEnum;
 	      else if (strcmp(name,"BasalforcingsPlumeradius")==0) return BasalforcingsPlumeradiusEnum;
 	      else if (strcmp(name,"BasalforcingsPlumex")==0) return BasalforcingsPlumexEnum;
@@ -132,13 +131,17 @@
 	      else if (strcmp(name,"DomainDimension")==0) return DomainDimensionEnum;
 	      else if (strcmp(name,"DomainType")==0) return DomainTypeEnum;
+	      else if (strcmp(name,"DslModel")==0) return DslModelEnum;
+	      else if (strcmp(name,"DslModelid")==0) return DslModelidEnum;
+	      else if (strcmp(name,"DslNummodels")==0) return DslNummodelsEnum;
+	      else if (strcmp(name,"DslComputeFingerprints")==0) return DslComputeFingerprintsEnum;
 	      else if (strcmp(name,"EarthId")==0) return EarthIdEnum;
 	      else if (strcmp(name,"EplZigZagCounter")==0) return EplZigZagCounterEnum;
-	      else if (strcmp(name,"EsaHElastic")==0) return EsaHElasticEnum;
-	      else if (strcmp(name,"EsaHemisphere")==0) return EsaHemisphereEnum;
-	      else if (strcmp(name,"EsaRequestedOutputs")==0) return EsaRequestedOutputsEnum;
          else stage=2;
    }
    if(stage==2){
-	      if (strcmp(name,"EsaUElastic")==0) return EsaUElasticEnum;
+	      if (strcmp(name,"EsaHElastic")==0) return EsaHElasticEnum;
+	      else if (strcmp(name,"EsaHemisphere")==0) return EsaHemisphereEnum;
+	      else if (strcmp(name,"EsaRequestedOutputs")==0) return EsaRequestedOutputsEnum;
+	      else if (strcmp(name,"EsaUElastic")==0) return EsaUElasticEnum;
 	      else if (strcmp(name,"ExtrapolationVariable")==0) return ExtrapolationVariableEnum;
 	      else if (strcmp(name,"FemModelComm")==0) return FemModelCommEnum;
@@ -151,4 +154,5 @@
 	      else if (strcmp(name,"FrictionCoupling")==0) return FrictionCouplingEnum;
 	      else if (strcmp(name,"FrictionDelta")==0) return FrictionDeltaEnum;
+	      else if (strcmp(name,"FrictionEffectivePressureLimit")==0) return FrictionEffectivePressureLimitEnum;
 	      else if (strcmp(name,"FrictionF")==0) return FrictionFEnum;
 	      else if (strcmp(name,"FrictionGamma")==0) return FrictionGammaEnum;
@@ -256,12 +260,12 @@
 	      else if (strcmp(name,"MaterialsHeatcapacity")==0) return MaterialsHeatcapacityEnum;
 	      else if (strcmp(name,"MaterialsLatentheat")==0) return MaterialsLatentheatEnum;
-	      else if (strcmp(name,"MaterialsLithosphereDensity")==0) return MaterialsLithosphereDensityEnum;
+         else stage=3;
+   }
+   if(stage==3){
+	      if (strcmp(name,"MaterialsLithosphereDensity")==0) return MaterialsLithosphereDensityEnum;
 	      else if (strcmp(name,"MaterialsLithosphereShearModulus")==0) return MaterialsLithosphereShearModulusEnum;
 	      else if (strcmp(name,"MaterialsMantleDensity")==0) return MaterialsMantleDensityEnum;
 	      else if (strcmp(name,"MaterialsMantleShearModulus")==0) return MaterialsMantleShearModulusEnum;
-         else stage=3;
-   }
-   if(stage==3){
-	      if (strcmp(name,"MaterialsMeltingpoint")==0) return MaterialsMeltingpointEnum;
+	      else if (strcmp(name,"MaterialsMeltingpoint")==0) return MaterialsMeltingpointEnum;
 	      else if (strcmp(name,"MaterialsMixedLayerCapacity")==0) return MaterialsMixedLayerCapacityEnum;
 	      else if (strcmp(name,"MaterialsMuWater")==0) return MaterialsMuWaterEnum;
@@ -290,6 +294,4 @@
 	      else if (strcmp(name,"OutputFilePointer")==0) return OutputFilePointerEnum;
 	      else if (strcmp(name,"Outputdefinition")==0) return OutputdefinitionEnum;
-	      else if (strcmp(name,"Param")==0) return ParamEnum;
-	      else if (strcmp(name,"Parameters")==0) return ParametersEnum;
 	      else if (strcmp(name,"QmuErrName")==0) return QmuErrNameEnum;
 	      else if (strcmp(name,"QmuInName")==0) return QmuInNameEnum;
@@ -297,4 +299,6 @@
 	      else if (strcmp(name,"QmuNumberofpartitions")==0) return QmuNumberofpartitionsEnum;
 	      else if (strcmp(name,"QmuOutName")==0) return QmuOutNameEnum;
+	      else if (strcmp(name,"QmuOutput")==0) return QmuOutputEnum;
+	      else if (strcmp(name,"QmuCurrEvalId")==0) return QmuCurrEvalIdEnum;
 	      else if (strcmp(name,"QmuEpartition")==0) return QmuEpartitionEnum;
 	      else if (strcmp(name,"QmuVpartition")==0) return QmuVpartitionEnum;
@@ -379,16 +383,18 @@
 	      else if (strcmp(name,"SmbRlapslgm")==0) return SmbRlapslgmEnum;
 	      else if (strcmp(name,"SmbRunoffalti")==0) return SmbRunoffaltiEnum;
-	      else if (strcmp(name,"SmbRunoffgrad")==0) return SmbRunoffgradEnum;
+         else stage=4;
+   }
+   if(stage==4){
+	      if (strcmp(name,"SmbRunoffgrad")==0) return SmbRunoffgradEnum;
 	      else if (strcmp(name,"SmbRunoffref")==0) return SmbRunoffrefEnum;
 	      else if (strcmp(name,"SmbSealev")==0) return SmbSealevEnum;
 	      else if (strcmp(name,"SmbStepsPerStep")==0) return SmbStepsPerStepEnum;
-         else stage=4;
-   }
-   if(stage==4){
-	      if (strcmp(name,"SmbSwIdx")==0) return SmbSwIdxEnum;
+	      else if (strcmp(name,"SmbSwIdx")==0) return SmbSwIdxEnum;
 	      else if (strcmp(name,"SmbT0dry")==0) return SmbT0dryEnum;
 	      else if (strcmp(name,"SmbT0wet")==0) return SmbT0wetEnum;
 	      else if (strcmp(name,"SmbTdiff")==0) return SmbTdiffEnum;
 	      else if (strcmp(name,"SmbThermoDeltaTScaling")==0) return SmbThermoDeltaTScalingEnum;
+	      else if (strcmp(name,"SmbTemperaturesReconstructedYears")==0) return SmbTemperaturesReconstructedYearsEnum;
+	      else if (strcmp(name,"SmbPrecipitationsReconstructedYears")==0) return SmbPrecipitationsReconstructedYearsEnum;
 	      else if (strcmp(name,"SmoothThicknessMultiplier")==0) return SmoothThicknessMultiplierEnum;
 	      else if (strcmp(name,"SolutionType")==0) return SolutionTypeEnum;
@@ -471,4 +477,5 @@
 	      else if (strcmp(name,"BasalforcingsGeothermalflux")==0) return BasalforcingsGeothermalfluxEnum;
 	      else if (strcmp(name,"BasalforcingsGroundediceMeltingRate")==0) return BasalforcingsGroundediceMeltingRateEnum;
+	      else if (strcmp(name,"BasalforcingsPerturbationMeltingRate")==0) return BasalforcingsPerturbationMeltingRateEnum;
 	      else if (strcmp(name,"BasalforcingsIsmip6BasinId")==0) return BasalforcingsIsmip6BasinIdEnum;
 	      else if (strcmp(name,"BasalforcingsIsmip6Tf")==0) return BasalforcingsIsmip6TfEnum;
@@ -479,4 +486,5 @@
 	      else if (strcmp(name,"BasalforcingsPicoBasinId")==0) return BasalforcingsPicoBasinIdEnum;
 	      else if (strcmp(name,"BasalforcingsPicoBoxId")==0) return BasalforcingsPicoBoxIdEnum;
+	      else if (strcmp(name,"BasalforcingsPicoOverturningCoeff")==0) return BasalforcingsPicoOverturningCoeffEnum;
 	      else if (strcmp(name,"BasalforcingsPicoSubShelfOceanOverturning")==0) return BasalforcingsPicoSubShelfOceanOverturningEnum;
 	      else if (strcmp(name,"BasalforcingsPicoSubShelfOceanSalinity")==0) return BasalforcingsPicoSubShelfOceanSalinityEnum;
@@ -486,4 +494,5 @@
 	      else if (strcmp(name,"BasalStress")==0) return BasalStressEnum;
 	      else if (strcmp(name,"Base")==0) return BaseEnum;
+	      else if (strcmp(name,"BaseOld")==0) return BaseOldEnum;
 	      else if (strcmp(name,"BaseSlopeX")==0) return BaseSlopeXEnum;
 	      else if (strcmp(name,"BaseSlopeY")==0) return BaseSlopeYEnum;
@@ -497,5 +506,8 @@
 	      else if (strcmp(name,"CalvingStressThresholdGroundedice")==0) return CalvingStressThresholdGroundediceEnum;
 	      else if (strcmp(name,"CalvinglevermannCoeff")==0) return CalvinglevermannCoeffEnum;
-	      else if (strcmp(name,"CalvingratexAverage")==0) return CalvingratexAverageEnum;
+         else stage=5;
+   }
+   if(stage==5){
+	      if (strcmp(name,"CalvingratexAverage")==0) return CalvingratexAverageEnum;
 	      else if (strcmp(name,"Calvingratex")==0) return CalvingratexEnum;
 	      else if (strcmp(name,"CalvingrateyAverage")==0) return CalvingrateyAverageEnum;
@@ -506,8 +518,7 @@
 	      else if (strcmp(name,"CrevasseDepth")==0) return CrevasseDepthEnum;
 	      else if (strcmp(name,"DamageD")==0) return DamageDEnum;
-         else stage=5;
-   }
-   if(stage==5){
-	      if (strcmp(name,"DamageDbar")==0) return DamageDbarEnum;
+	      else if (strcmp(name,"DamageDOld")==0) return DamageDOldEnum;
+	      else if (strcmp(name,"DamageDbar")==0) return DamageDbarEnum;
+	      else if (strcmp(name,"DamageDbarOld")==0) return DamageDbarOldEnum;
 	      else if (strcmp(name,"DamageF")==0) return DamageFEnum;
 	      else if (strcmp(name,"DegreeOfChannelization")==0) return DegreeOfChannelizationEnum;
@@ -530,4 +541,9 @@
 	      else if (strcmp(name,"DrivingStressX")==0) return DrivingStressXEnum;
 	      else if (strcmp(name,"DrivingStressY")==0) return DrivingStressYEnum;
+	      else if (strcmp(name,"DslGlobalAverageThermostericSeaLevelChange")==0) return DslGlobalAverageThermostericSeaLevelChangeEnum;
+	      else if (strcmp(name,"DslSeaSurfaceHeightChangeAboveGeoid")==0) return DslSeaSurfaceHeightChangeAboveGeoidEnum;
+	      else if (strcmp(name,"DslStericRate")==0) return DslStericRateEnum;
+	      else if (strcmp(name,"DslDynamicRate")==0) return DslDynamicRateEnum;
+	      else if (strcmp(name,"Dummy")==0) return DummyEnum;
 	      else if (strcmp(name,"EffectivePressure")==0) return EffectivePressureEnum;
 	      else if (strcmp(name,"EffectivePressureSubstep")==0) return EffectivePressureSubstepEnum;
@@ -613,5 +629,8 @@
 	      else if (strcmp(name,"InversionThicknessObs")==0) return InversionThicknessObsEnum;
 	      else if (strcmp(name,"InversionVelObs")==0) return InversionVelObsEnum;
-	      else if (strcmp(name,"InversionVxObs")==0) return InversionVxObsEnum;
+         else stage=6;
+   }
+   if(stage==6){
+	      if (strcmp(name,"InversionVxObs")==0) return InversionVxObsEnum;
 	      else if (strcmp(name,"InversionVyObs")==0) return InversionVyObsEnum;
 	      else if (strcmp(name,"LevelsetfunctionSlopeX")==0) return LevelsetfunctionSlopeXEnum;
@@ -629,8 +648,5 @@
 	      else if (strcmp(name,"MaterialsRheologyE")==0) return MaterialsRheologyEEnum;
 	      else if (strcmp(name,"MaterialsRheologyEbar")==0) return MaterialsRheologyEbarEnum;
-         else stage=6;
-   }
-   if(stage==6){
-	      if (strcmp(name,"MaterialsRheologyEc")==0) return MaterialsRheologyEcEnum;
+	      else if (strcmp(name,"MaterialsRheologyEc")==0) return MaterialsRheologyEcEnum;
 	      else if (strcmp(name,"MaterialsRheologyEcbar")==0) return MaterialsRheologyEcbarEnum;
 	      else if (strcmp(name,"MaterialsRheologyEs")==0) return MaterialsRheologyEsEnum;
@@ -648,4 +664,5 @@
 	      else if (strcmp(name,"P0")==0) return P0Enum;
 	      else if (strcmp(name,"P1")==0) return P1Enum;
+	      else if (strcmp(name,"Partitioning")==0) return PartitioningEnum;
 	      else if (strcmp(name,"Pressure")==0) return PressureEnum;
 	      else if (strcmp(name,"Radar")==0) return RadarEnum;
@@ -677,7 +694,7 @@
 	      else if (strcmp(name,"SealevelUNorthEsa")==0) return SealevelUNorthEsaEnum;
 	      else if (strcmp(name,"SealevelriseCumDeltathickness")==0) return SealevelriseCumDeltathicknessEnum;
+	      else if (strcmp(name,"SealevelriseCumDeltathicknessOld")==0) return SealevelriseCumDeltathicknessOldEnum;
 	      else if (strcmp(name,"SealevelriseDeltathickness")==0) return SealevelriseDeltathicknessEnum;
 	      else if (strcmp(name,"SealevelriseSpcthickness")==0) return SealevelriseSpcthicknessEnum;
-	      else if (strcmp(name,"SealevelriseStericRate")==0) return SealevelriseStericRateEnum;
 	      else if (strcmp(name,"SealevelriseHydroRate")==0) return SealevelriseHydroRateEnum;
 	      else if (strcmp(name,"SedimentHead")==0) return SedimentHeadEnum;
@@ -718,4 +735,5 @@
 	      else if (strcmp(name,"SmbEAir")==0) return SmbEAirEnum;
 	      else if (strcmp(name,"SmbEC")==0) return SmbECEnum;
+	      else if (strcmp(name,"SmbECDt")==0) return SmbECDtEnum;
 	      else if (strcmp(name,"SmbECini")==0) return SmbECiniEnum;
 	      else if (strcmp(name,"SmbEla")==0) return SmbElaEnum;
@@ -734,5 +752,8 @@
 	      else if (strcmp(name,"SmbMassBalanceTransient")==0) return SmbMassBalanceTransientEnum;
 	      else if (strcmp(name,"SmbMeanLHF")==0) return SmbMeanLHFEnum;
-	      else if (strcmp(name,"SmbMeanSHF")==0) return SmbMeanSHFEnum;
+         else stage=7;
+   }
+   if(stage==7){
+	      if (strcmp(name,"SmbMeanSHF")==0) return SmbMeanSHFEnum;
 	      else if (strcmp(name,"SmbMeanULW")==0) return SmbMeanULWEnum;
 	      else if (strcmp(name,"SmbMelt")==0) return SmbMeltEnum;
@@ -752,8 +773,5 @@
 	      else if (strcmp(name,"SmbRefreeze")==0) return SmbRefreezeEnum;
 	      else if (strcmp(name,"SmbReini")==0) return SmbReiniEnum;
-         else stage=7;
-   }
-   if(stage==7){
-	      if (strcmp(name,"SmbRunoff")==0) return SmbRunoffEnum;
+	      else if (strcmp(name,"SmbRunoff")==0) return SmbRunoffEnum;
 	      else if (strcmp(name,"SmbRunoffSubstep")==0) return SmbRunoffSubstepEnum;
 	      else if (strcmp(name,"SmbRunoffTransient")==0) return SmbRunoffTransientEnum;
@@ -778,4 +796,5 @@
 	      else if (strcmp(name,"SmbVz")==0) return SmbVzEnum;
 	      else if (strcmp(name,"SmbW")==0) return SmbWEnum;
+	      else if (strcmp(name,"SmbWAdd")==0) return SmbWAddEnum;
 	      else if (strcmp(name,"SmbWini")==0) return SmbWiniEnum;
 	      else if (strcmp(name,"SmbZMax")==0) return SmbZMaxEnum;
@@ -805,4 +824,5 @@
 	      else if (strcmp(name,"SurfaceCrevasse")==0) return SurfaceCrevasseEnum;
 	      else if (strcmp(name,"Surface")==0) return SurfaceEnum;
+	      else if (strcmp(name,"SurfaceOld")==0) return SurfaceOldEnum;
 	      else if (strcmp(name,"SurfaceLogVelMisfit")==0) return SurfaceLogVelMisfitEnum;
 	      else if (strcmp(name,"SurfaceLogVxVyMisfit")==0) return SurfaceLogVxVyMisfitEnum;
@@ -855,5 +875,8 @@
 	      else if (strcmp(name,"Outputdefinition17")==0) return Outputdefinition17Enum;
 	      else if (strcmp(name,"Outputdefinition18")==0) return Outputdefinition18Enum;
-	      else if (strcmp(name,"Outputdefinition19")==0) return Outputdefinition19Enum;
+         else stage=8;
+   }
+   if(stage==8){
+	      if (strcmp(name,"Outputdefinition19")==0) return Outputdefinition19Enum;
 	      else if (strcmp(name,"Outputdefinition20")==0) return Outputdefinition20Enum;
 	      else if (strcmp(name,"Outputdefinition21")==0) return Outputdefinition21Enum;
@@ -875,8 +898,5 @@
 	      else if (strcmp(name,"Outputdefinition36")==0) return Outputdefinition36Enum;
 	      else if (strcmp(name,"Outputdefinition37")==0) return Outputdefinition37Enum;
-         else stage=8;
-   }
-   if(stage==8){
-	      if (strcmp(name,"Outputdefinition38")==0) return Outputdefinition38Enum;
+	      else if (strcmp(name,"Outputdefinition38")==0) return Outputdefinition38Enum;
 	      else if (strcmp(name,"Outputdefinition39")==0) return Outputdefinition39Enum;
 	      else if (strcmp(name,"Outputdefinition3")==0) return Outputdefinition3Enum;
@@ -976,5 +996,10 @@
 	      else if (strcmp(name,"BoolExternalResult")==0) return BoolExternalResultEnum;
 	      else if (strcmp(name,"BoolInput")==0) return BoolInputEnum;
-	      else if (strcmp(name,"BoolParam")==0) return BoolParamEnum;
+	      else if (strcmp(name,"BoolInput2")==0) return BoolInput2Enum;
+	      else if (strcmp(name,"IntInput2")==0) return IntInput2Enum;
+         else stage=9;
+   }
+   if(stage==9){
+	      if (strcmp(name,"BoolParam")==0) return BoolParamEnum;
 	      else if (strcmp(name,"Boundary")==0) return BoundaryEnum;
 	      else if (strcmp(name,"BuddJacka")==0) return BuddJackaEnum;
@@ -996,10 +1021,8 @@
 	      else if (strcmp(name,"Contours")==0) return ContoursEnum;
 	      else if (strcmp(name,"ControlInput")==0) return ControlInputEnum;
+	      else if (strcmp(name,"ControlInput2")==0) return ControlInput2Enum;
 	      else if (strcmp(name,"ControlInputGrad")==0) return ControlInputGradEnum;
 	      else if (strcmp(name,"ControlInputMaxs")==0) return ControlInputMaxsEnum;
-         else stage=9;
-   }
-   if(stage==9){
-	      if (strcmp(name,"ControlInputMins")==0) return ControlInputMinsEnum;
+	      else if (strcmp(name,"ControlInputMins")==0) return ControlInputMinsEnum;
 	      else if (strcmp(name,"ControlInputValues")==0) return ControlInputValuesEnum;
 	      else if (strcmp(name,"CrouzeixRaviart")==0) return CrouzeixRaviartEnum;
@@ -1011,4 +1034,5 @@
 	      else if (strcmp(name,"DataSetParam")==0) return DataSetParamEnum;
 	      else if (strcmp(name,"DatasetInput")==0) return DatasetInputEnum;
+	      else if (strcmp(name,"DatasetInput2")==0) return DatasetInput2Enum;
 	      else if (strcmp(name,"DefaultAnalysis")==0) return DefaultAnalysisEnum;
 	      else if (strcmp(name,"DefaultCalving")==0) return DefaultCalvingEnum;
@@ -1020,4 +1044,5 @@
 	      else if (strcmp(name,"Domain3Dsurface")==0) return Domain3DsurfaceEnum;
 	      else if (strcmp(name,"DoubleArrayInput")==0) return DoubleArrayInputEnum;
+	      else if (strcmp(name,"ArrayInput2")==0) return ArrayInput2Enum;
 	      else if (strcmp(name,"DoubleExternalResult")==0) return DoubleExternalResultEnum;
 	      else if (strcmp(name,"DoubleInput")==0) return DoubleInputEnum;
@@ -1096,12 +1121,20 @@
 	      else if (strcmp(name,"IcefrontMassFlux")==0) return IcefrontMassFluxEnum;
 	      else if (strcmp(name,"IcefrontMassFluxLevelset")==0) return IcefrontMassFluxLevelsetEnum;
-	      else if (strcmp(name,"Incremental")==0) return IncrementalEnum;
+         else stage=10;
+   }
+   if(stage==10){
+	      if (strcmp(name,"Incremental")==0) return IncrementalEnum;
 	      else if (strcmp(name,"Indexed")==0) return IndexedEnum;
 	      else if (strcmp(name,"IntExternalResult")==0) return IntExternalResultEnum;
 	      else if (strcmp(name,"IntInput")==0) return IntInputEnum;
+	      else if (strcmp(name,"ElementInput2")==0) return ElementInput2Enum;
+	      else if (strcmp(name,"SegInput2")==0) return SegInput2Enum;
+	      else if (strcmp(name,"TriaInput2")==0) return TriaInput2Enum;
+	      else if (strcmp(name,"PentaInput2")==0) return PentaInput2Enum;
 	      else if (strcmp(name,"IntMatExternalResult")==0) return IntMatExternalResultEnum;
 	      else if (strcmp(name,"IntMatParam")==0) return IntMatParamEnum;
 	      else if (strcmp(name,"IntParam")==0) return IntParamEnum;
 	      else if (strcmp(name,"IntVecParam")==0) return IntVecParamEnum;
+	      else if (strcmp(name,"Inputs2")==0) return Inputs2Enum;
 	      else if (strcmp(name,"Internal")==0) return InternalEnum;
 	      else if (strcmp(name,"Intersect")==0) return IntersectEnum;
@@ -1121,8 +1154,5 @@
 	      else if (strcmp(name,"LoveAnalysis")==0) return LoveAnalysisEnum;
 	      else if (strcmp(name,"LoveHi")==0) return LoveHiEnum;
-         else stage=10;
-   }
-   if(stage==10){
-	      if (strcmp(name,"LoveHr")==0) return LoveHrEnum;
+	      else if (strcmp(name,"LoveHr")==0) return LoveHrEnum;
 	      else if (strcmp(name,"LoveKernelsImag")==0) return LoveKernelsImagEnum;
 	      else if (strcmp(name,"LoveKernelsReal")==0) return LoveKernelsRealEnum;
@@ -1186,4 +1216,6 @@
 	      else if (strcmp(name,"Open")==0) return OpenEnum;
 	      else if (strcmp(name,"Option")==0) return OptionEnum;
+	      else if (strcmp(name,"Param")==0) return ParamEnum;
+	      else if (strcmp(name,"Parameters")==0) return ParametersEnum;
 	      else if (strcmp(name,"P0Array")==0) return P0ArrayEnum;
 	      else if (strcmp(name,"P0DG")==0) return P0DGEnum;
@@ -1212,5 +1244,8 @@
 	      else if (strcmp(name,"Regionaloutput")==0) return RegionaloutputEnum;
 	      else if (strcmp(name,"Regular")==0) return RegularEnum;
-	      else if (strcmp(name,"RecoveryAnalysis")==0) return RecoveryAnalysisEnum;
+         else stage=11;
+   }
+   if(stage==11){
+	      if (strcmp(name,"RecoveryAnalysis")==0) return RecoveryAnalysisEnum;
 	      else if (strcmp(name,"Riftfront")==0) return RiftfrontEnum;
 	      else if (strcmp(name,"SIAApproximation")==0) return SIAApproximationEnum;
@@ -1244,8 +1279,5 @@
 	      else if (strcmp(name,"SegInput")==0) return SegInputEnum;
 	      else if (strcmp(name,"Segment")==0) return SegmentEnum;
-         else stage=11;
-   }
-   if(stage==11){
-	      if (strcmp(name,"SegmentRiftfront")==0) return SegmentRiftfrontEnum;
+	      else if (strcmp(name,"SegmentRiftfront")==0) return SegmentRiftfrontEnum;
 	      else if (strcmp(name,"Separate")==0) return SeparateEnum;
 	      else if (strcmp(name,"Seq")==0) return SeqEnum;
@@ -1291,4 +1323,5 @@
 	      else if (strcmp(name,"TransientArrayParam")==0) return TransientArrayParamEnum;
 	      else if (strcmp(name,"TransientInput")==0) return TransientInputEnum;
+	      else if (strcmp(name,"TransientInput2")==0) return TransientInput2Enum;
 	      else if (strcmp(name,"TransientParam")==0) return TransientParamEnum;
 	      else if (strcmp(name,"TransientSolution")==0) return TransientSolutionEnum;
Index: /issm/trunk/src/c/shared/Exceptions/Exceptions.cpp
===================================================================
--- /issm/trunk/src/c/shared/Exceptions/Exceptions.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/Exceptions/Exceptions.cpp	(revision 24686)
@@ -11,4 +11,7 @@
 #include <cstring>
 #include <cstdio>
+#include <string>
+#include <iostream>
+#include <iomanip>
 #include "./exceptions.h"
 #include "../io/Print/Print.h"
Index: /issm/trunk/src/c/shared/Exceptions/exceptions.h
===================================================================
--- /issm/trunk/src/c/shared/Exceptions/exceptions.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Exceptions/exceptions.h	(revision 24686)
@@ -23,4 +23,8 @@
 #error "Cannot compile with HAVE_CONFIG_H symbol! run configure first!"
 #endif
+
+/*Only include forward declaration to save compile time*/
+#include <iosfwd>
+#include <sstream>
 
 /*macros: */
Index: /issm/trunk/src/c/shared/Numerics/GaussPoints.cpp
===================================================================
--- /issm/trunk/src/c/shared/Numerics/GaussPoints.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/Numerics/GaussPoints.cpp	(revision 24686)
@@ -20,4 +20,5 @@
 		For degree p, the required number of Gauss-Legendre points is
 		n>=(p+1)/2.*/
+	_assert_(ngaus>0);
 
 	/*Intermediaries*/
@@ -98,4 +99,5 @@
 	  Symmetrical Gaussian Quadrature Rules for the Triangle", IJNME,
 	  Vol. 21, pp. 1129-1148 (1985), as transcribed for Probe rules3.*/
+	_assert_(iord>0);
 
 	/*Intermediaries*/
@@ -1218,4 +1220,5 @@
 		Quadrature Formulas", Computer Methods in Applied Mechanics and
 		Engineering, Vol. 55, pp. 339-348 (1986).*/
+	_assert_(iord>0);
 
 	/*Intermediaries*/
@@ -1484,4 +1487,5 @@
 	  For degree p, the required number of Gauss-Lobatto points is
 	  n>=(p+1)/2+1 (one more than Gauss-Legendre).*/
+	_assert_(ngaus>0);
 
 	int i;
Index: /issm/trunk/src/c/shared/Numerics/constants.h
===================================================================
--- /issm/trunk/src/c/shared/Numerics/constants.h	(revision 24685)
+++ /issm/trunk/src/c/shared/Numerics/constants.h	(revision 24686)
@@ -1,5 +1,5 @@
 /*!\file: constants.h
  * \brief prototypes for constants.h
- */ 
+ */
 
 #ifndef _ISSM_CONSTANTS_H_
@@ -9,5 +9,5 @@
 #define SQRT2 1.414213562373095048801688724209698078569671875376948073176679738
 #define SQRT3 1.732050807568877293527446341505872366942805253810380628055806979
-#define PI 3.141592653589793238462643383279502884197169399375105820974944592308
+const double PI=3.141592653589793238462643383279502884197169399375105820974944592308; // Macro definition conflicts with Dakota's declaration of PI
 
 #define NDOF1 1
@@ -18,11 +18,11 @@
 // /*Windows specific typefefs: */
 // #ifdef _INTEL_WIN_
-// 
+//
 // #ifndef NAN
 // //For reference, for Intel compile on win64
-// //#define NAN 0.0/0.0 
+// //#define NAN 0.0/0.0
 // #define NAN (INFINITY-INFINITY)
 // #endif
-// 
+//
 // #ifndef INFINITY
 // //For reference, for Intel compile on win64
@@ -30,5 +30,5 @@
 // #define INFINITY (DBL_MAX+DBL_MAX)
 // #endif
-// 
+//
 // #endif /*_INTEL_WIN_*/
 
Index: /issm/trunk/src/c/shared/io/Marshalling/IoCodeConversions.cpp
===================================================================
--- /issm/trunk/src/c/shared/io/Marshalling/IoCodeConversions.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/io/Marshalling/IoCodeConversions.cpp	(revision 24686)
@@ -173,7 +173,7 @@
 		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
 	}
-	else if(strcmp(string_in,"SealevelriseStericRate")==0){
-		const char* field = "md.slr.steric_rate";
-		input_enum        = SealevelriseStericRateEnum;
+	else if(strcmp(string_in,"DslGlobalAverageThermostericSeaLevelChange")==0){
+		const char* field = "md.dsl.global_average_thermosteric_sea_level_change";
+		input_enum        = DslGlobalAverageThermostericSeaLevelChangeEnum;
 		fieldname=xNew<char>((strlen(field)+1)); xMemCpy<char>(fieldname,field,(strlen(field)+1));
 	}
Index: /issm/trunk/src/c/shared/io/Print/Print.h
===================================================================
--- /issm/trunk/src/c/shared/io/Print/Print.h	(revision 24685)
+++ /issm/trunk/src/c/shared/io/Print/Print.h	(revision 24686)
@@ -12,8 +12,7 @@
 #endif 
 
-#include <string>
-#include <iostream>
+/*Only include forward declaration to save compile time*/
+#include <iosfwd>
 #include <sstream>
-#include <iomanip>
 
 using namespace std;
Index: /issm/trunk/src/c/shared/io/Print/PrintfFunction.cpp
===================================================================
--- /issm/trunk/src/c/shared/io/Print/PrintfFunction.cpp	(revision 24685)
+++ /issm/trunk/src/c/shared/io/Print/PrintfFunction.cpp	(revision 24686)
@@ -12,4 +12,8 @@
 #include <stdarg.h>
 #include <cstdio>
+#include <string>
+#include <iostream>
+#include <iomanip>
+
 #ifdef _HAVE_ANDROID_NDK_
 #include <android/log.h>
@@ -21,8 +25,7 @@
 
 int PrintfFunctionOnCpu0(const string & message){
-	int  my_rank;
 
 	/*recover my_rank:*/
-	my_rank=IssmComm::GetRank();
+	int my_rank=IssmComm::GetRank();
 
 	if(my_rank==0){
Index: /issm/trunk/src/c/solutionsequences/solutionsequence_nonlinear.cpp
===================================================================
--- /issm/trunk/src/c/solutionsequences/solutionsequence_nonlinear.cpp	(revision 24685)
+++ /issm/trunk/src/c/solutionsequences/solutionsequence_nonlinear.cpp	(revision 24686)
@@ -54,7 +54,12 @@
 	Reducevectorgtofx(&uf, ug, femmodel->nodes,femmodel->parameters);
 
-	//Update once again the solution to make sure that vx and vxold are similar (for next step in transient or steadystate)
+	/*Update once again the solution to make sure that vx and vxold are similar (for next step in transient or steadystate)*/
 	InputUpdateFromConstantx(femmodel,converged,ConvergedEnum);
 	InputUpdateFromSolutionx(femmodel,ug);
+
+	/*allocate the matrices once and reuse them per iteration*/
+	if(femmodel->loads->numrifts == 0){
+		AllocateSystemMatricesx(&Kff,&Kfs,&df,&pf,femmodel);
+	}
 
 	for(;;){
@@ -64,7 +69,10 @@
 		delete ug;
 
-		SystemMatricesx(&Kff,&Kfs,&pf,&df,NULL,femmodel);
+		if(femmodel->loads->numrifts){
+			AllocateSystemMatricesx(&Kff,&Kfs,&df,&pf,femmodel);
+		}
+		SystemMatricesx(&Kff,&Kfs,&pf,&df,NULL,femmodel, true);
 		CreateNodalConstraintsx(&ys,femmodel->nodes);
-		Reduceloadx(pf, Kfs, ys); delete Kfs;
+		Reduceloadx(pf, Kfs, ys);
 		femmodel->profiler->Start(SOLVER);
 		Solverx(&uf, Kff, pf, old_uf, df, femmodel->parameters);
@@ -73,7 +81,12 @@
 		Mergesolutionfromftogx(&ug, uf,ys,femmodel->nodes,femmodel->parameters);delete ys;
 
-		convergence(&converged,Kff,pf,uf,old_uf,eps_res,eps_rel,eps_abs); delete Kff; delete pf; delete df;
+		convergence(&converged,Kff,pf,uf,old_uf,eps_res,eps_rel,eps_abs);
 		InputUpdateFromConstantx(femmodel,converged,ConvergedEnum);
 		InputUpdateFromSolutionx(femmodel,ug);
+
+		/*Clean up if rifts*/
+		if(femmodel->loads->numrifts){
+			delete Kfs; delete Kff; delete pf; delete df;
+		}
 
 		ConstraintsStatex(&constraints_converged,&num_unstable_constraints,femmodel);
@@ -102,4 +115,17 @@
 			break;
 		}
+
+		/*Set the matrix entries to zero if we do an other iteration*/
+		if(femmodel->loads->numrifts==0){
+			Kff->SetZero();
+			Kfs->SetZero();
+			df->Set(0);
+			pf->Set(0);
+		}
+	}
+
+	/*delete matrices after the iteration loop*/
+	if(femmodel->loads->numrifts==0){
+		delete Kff; delete pf; delete df; delete Kfs;
 	}
 
Index: /issm/trunk/src/c/solutionsequences/solutionsequence_schurcg.cpp
===================================================================
--- /issm/trunk/src/c/solutionsequences/solutionsequence_schurcg.cpp	(revision 24685)
+++ /issm/trunk/src/c/solutionsequences/solutionsequence_schurcg.cpp	(revision 24686)
@@ -88,5 +88,5 @@
 	 * and I the Schur preconditioner (stored here, because the space was allocated either way) 
 	 *         */
-	#if _PETSC_MINOR_>10
+	#if _PETSC_MINOR_>8
 	MatCreateSubMatrix(Kff,isv,isv,MAT_INITIAL_MATRIX,&A);
 	MatCreateSubMatrix(Kff,isv,isp,MAT_INITIAL_MATRIX,&B);
@@ -99,5 +99,5 @@
 	
 	/* Extract preconditioner matrix on the pressure space*/
-	#if _PETSC_MINOR_>10
+	#if _PETSC_MINOR_>8
 	MatCreateSubMatrix(Kff,isp,isp,MAT_INITIAL_MATRIX,&IP);
 	#else
@@ -539,5 +539,5 @@
 
 	/*Extract A, B, B^T */
-	#if _PETSC_MINOR_>10
+	#if _PETSC_MINOR_>8
 	MatCreateSubMatrix(Kff->pmatrix->matrix,isv,isv,MAT_INITIAL_MATRIX,&A);
 	MatCreateSubMatrix(Kff->pmatrix->matrix,isv,isp,MAT_INITIAL_MATRIX,&B);
Index: /issm/trunk/src/c/toolkits/issm/IssmAbsMat.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmAbsMat.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmAbsMat.h	(revision 24686)
@@ -43,4 +43,5 @@
 		virtual void SetValues(int m,int* idxm,int n,int* idxn,doubletype* values,InsMode mode)=0;
 		virtual void Convert(MatrixType type)=0;
+		virtual void SetZero(void)=0;
 		#ifndef _HAVE_WRAPPERS_
 		virtual IssmAbsVec<IssmDouble>* Solve(IssmAbsVec<IssmDouble>* pf, Parameters* parameters)=0;
Index: /issm/trunk/src/c/toolkits/issm/IssmDenseMat.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmDenseMat.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmDenseMat.h	(revision 24686)
@@ -112,11 +112,9 @@
 
 		/*IssmAbsMat virtual functions*/
-		/*Echo{{{*/
-		void Echo(void){
-
-			int i,j;
+		void Echo(void){/*{{{*/
+
 			_printf_("IssmDenseMat size " << this->M << "-" << this->N << "\n");
-			for(i=0;i<M;i++){
-				for(j=0;j<N;j++){
+			for(int i=0;i<M;i++){
+				for(int j=0;j<N;j++){
 					_printf_(this->matrix[N*i+j] << " ");
 				}
@@ -125,6 +123,5 @@
 		}
 		/*}}}*/
-		/*Assemble{{{*/
-		void Assemble(void){
+		void Assemble(void){/*{{{*/
 
 			/*do nothing*/
@@ -132,6 +129,5 @@
 		}
 		/*}}}*/
-		/*Norm{{{*/
-		doubletype Norm(NormMode mode){
+		doubletype Norm(NormMode mode){/*{{{*/
 
 			doubletype norm;
@@ -167,14 +163,10 @@
 		}
 		/*}}}*/
-		/*GetSize{{{*/
-		void GetSize(int* pM,int* pN){
-
+		void GetSize(int* pM,int* pN){/*{{{*/
 			*pM=this->M;
 			*pN=this->N;
-
-		}
-		/*}}}*/
-		/*GetLocalSize{{{*/
-		void GetLocalSize(int* pM,int* pN){
+		}
+		/*}}}*/
+		void GetLocalSize(int* pM,int* pN){/*{{{*/
 
 			*pM=this->M;
@@ -183,6 +175,5 @@
 		}
 		/*}}}*/
-		/*MatMult{{{*/
-		void MatMult(IssmAbsVec<doubletype>* Xin,IssmAbsVec<doubletype>* AXin){
+		void MatMult(IssmAbsVec<doubletype>* Xin,IssmAbsVec<doubletype>* AXin){/*{{{*/
 
 			/*We assume that the vectors coming in are of compatible type: */
@@ -214,6 +205,5 @@
 		}
 		/*}}}*/
-		/*Duplicate{{{*/
-		IssmDenseMat<doubletype>* Duplicate(void){
+		IssmDenseMat<doubletype>* Duplicate(void){/*{{{*/
 
 			doubletype dummy=0;
@@ -223,6 +213,5 @@
 		}
 		/*}}}*/
-		/*ToSerial{{{*/
-		doubletype* ToSerial(void){
+		doubletype* ToSerial(void){/*{{{*/
 
 			doubletype* buffer=NULL;
@@ -236,6 +225,5 @@
 		}
 		/*}}}*/
-		/*SetValues{{{*/
-		void SetValues(int m,int* idxm,int n,int* idxn,doubletype* values,InsMode mode){
+		void SetValues(int m,int* idxm,int n,int* idxn,doubletype* values,InsMode mode){/*{{{*/
 
 			int i,j;
@@ -254,6 +242,5 @@
 		}
 		/*}}}*/
-		/*Convert{{{*/
-		void Convert(MatrixType type){
+		void Convert(MatrixType type){/*{{{*/
 
 			/*do nothing*/
@@ -261,7 +248,13 @@
 		}
 		/*}}}*/		
+		void SetZero(void){/*{{{*/
+			for(int i=0;i<M;i++){
+				for(int j=0;j<N;j++){
+					this->matrix[N*i+j] = 0.;
+				}
+			}
+		}/*}}}*/
 		#ifndef _HAVE_WRAPPERS_
-		/*Solve{{{*/
-		IssmAbsVec<IssmDouble>* Solve(IssmAbsVec<IssmDouble>* pfin, Parameters* parameters){
+		IssmAbsVec<IssmDouble>* Solve(IssmAbsVec<IssmDouble>* pfin, Parameters* parameters){/*{{{*/
 
 			/*First off, we assume that the type of IssmAbsVec is IssmSeqVec. So downcast: */
Index: /issm/trunk/src/c/toolkits/issm/IssmMat.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmMat.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmMat.h	(revision 24686)
@@ -239,4 +239,7 @@
 			matrix->convert(type);
 		}/*}}}*/
+		void SetZero(void){/*{{{*/
+			matrix->SetZero();
+		}/*}}}*/
 		#ifndef _HAVE_WRAPPERS_
 		IssmVec<doubletype>* Solve(IssmVec<doubletype>* pf, Parameters* parameters){ /*{{{*/
Index: /issm/trunk/src/c/toolkits/issm/IssmMpiDenseMat.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmMpiDenseMat.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmMpiDenseMat.h	(revision 24686)
@@ -400,4 +400,7 @@
 		}
 		/*}}}*/
+		void SetZero(void){/*{{{*/
+			for(int i=0;i<this->m*this->N;i++) this->matrix[i] = 0.;
+		}/*}}}*/
 		void Convert(MatrixType type){/*{{{*/
 			_error_("not supported yet!");
Index: /issm/trunk/src/c/toolkits/issm/IssmMpiSparseMat.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmMpiSparseMat.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmMpiSparseMat.h	(revision 24686)
@@ -91,6 +91,4 @@
 		void Init(int Min,int Nin){/*{{{*/
 			
-			int i;
-
 			this->buckets=new DataSet();
 
@@ -107,5 +105,5 @@
 			if (m*N){
 				this->matrix=xNew<SparseRow<doubletype>*>(m);
-				for(i=0;i<m;i++){
+				for(int i=0;i<m;i++){
 					this->matrix[i]=new SparseRow<doubletype>(N);
 				}
@@ -114,15 +112,13 @@
 		/*}}}*/
 		~IssmMpiSparseMat(){/*{{{*/
-			int i;
-
 			if(m*N){
-				for(i=0;i<m;i++){
+				for(int i=0;i<m;i++){
 					delete this->matrix[i];
 				}
 				xDelete<SparseRow<doubletype>*>(this->matrix);
 			}
-			M=0;
-			N=0;
-			m=0;
+			this->M=0;
+			this->N=0;
+			this->m=0;
 			delete this->buckets;
 		}
@@ -421,4 +417,21 @@
 		}
 		/*}}}*/
+		void SetZero(void){/*{{{*/
+
+			/*Reset buckets*/
+			delete this->buckets;
+			this->buckets=new DataSet();
+
+			/*reset matrix*/
+			if(m*N){
+				for(int i=0;i<m;i++) delete this->matrix[i];
+				xDelete<SparseRow<doubletype>*>(this->matrix);
+
+				this->matrix=xNew<SparseRow<doubletype>*>(m);
+				for(int i=0;i<m;i++) this->matrix[i]=new SparseRow<doubletype>(N);
+			}
+
+			/*Reallocate matrix*/
+		}/*}}}*/
 		void Convert(MatrixType type){/*{{{*/
 			_error_("not supported yet!");
Index: /issm/trunk/src/c/toolkits/issm/IssmMpiVec.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmMpiVec.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmMpiVec.h	(revision 24686)
@@ -380,6 +380,5 @@
 		void Set(doubletype value){/*{{{*/
 
-			int i;
-			for(i=0;i<this->m;i++)this->vector[i]=value;
+			for(int i=0;i<this->m;i++)this->vector[i]=value;
 
 		}
Index: /issm/trunk/src/c/toolkits/issm/IssmSeqVec.h
===================================================================
--- /issm/trunk/src/c/toolkits/issm/IssmSeqVec.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/issm/IssmSeqVec.h	(revision 24686)
@@ -173,6 +173,5 @@
 		void Set(doubletype value){/*{{{*/
 
-			int i;
-			for(i=0;i<this->M;i++)this->vector[i]=value;
+			for(int i=0;i<this->M;i++)this->vector[i]=value;
 
 		}
Index: /issm/trunk/src/c/toolkits/metis/patches/METIS_PartMeshNodalPatch.cpp
===================================================================
--- /issm/trunk/src/c/toolkits/metis/patches/METIS_PartMeshNodalPatch.cpp	(revision 24685)
+++ /issm/trunk/src/c/toolkits/metis/patches/METIS_PartMeshNodalPatch.cpp	(revision 24686)
@@ -7,53 +7,70 @@
 #include "../../../shared/shared.h"
 
-void METIS_PartMeshNodalPatch(int* pnumberofelements,int* pnumberofnodes, int* index, int* petype, int* pnumflag, int* pnum_procs, int* pedgecut, int* epart, int* npart){
+/*METIS prototypes*/
+extern "C" {
+#if _METIS_VERSION_ == 4
+	void METIS_PartMeshNodal(int *, int *, idxtype *, int *, int *, int *, int *, idxtype *, idxtype *);
+#endif
+#if _METIS_VERSION_ == 5
+	int METIS_PartMeshNodal(idx_t*, idx_t*, idx_t*, idx_t*, idx_t*, idx_t*, idx_t*, real_t*, idx_t*, idx_t*, idx_t*, idx_t*);
+	int METIS_SetDefaultOptions(idx_t *options);
+#endif
+}
+
+void METIS_PartMeshNodalPatch(int numberofelements,int numberofnodes,int* index,int* vweights,int num_procs,int* epart,int* npart){
 
 	#if _METIS_VERSION_ == 4
 	/*Our interface originates in the Metis 4.0 version, hence identical calls*/
-	METIS_PartMeshNodal(pnumberofelements,pnumberofnodes, index, petype, pnumflag, pnum_procs, pedgecut, epart, npart); 
+	int  edgecut=1;
+	int  etype  =1; //tria mesh see metis/Programs/Io.c
+	int  numflag=0;
+	METIS_PartMeshNodal(&numberofelements,&numberofnodes, index,&etype,&numflag,&num_procs,&edgecut, epart, npart); 
+
 	#elif _METIS_VERSION_ == 5
-	/*This interface is heavily changed. More options, different ways of meshing, etc ...: */ 
-	int i;
 
+	/*Create options*/
 	idx_t options[METIS_NOPTIONS];
-	idx_t objval;
-	idx_t* eptr=NULL;
-	idx_t  k=0;
-	real_t* tpwgts=NULL;
-
-	/*setup options: */
 	METIS_SetDefaultOptions(options);
 
-	options[METIS_OPTION_PTYPE]   = 1;
-	options[METIS_OPTION_OBJTYPE] = 0;
-	options[METIS_OPTION_CTYPE]   = 1;
-	options[METIS_OPTION_IPTYPE]  = 4;
-	options[METIS_OPTION_RTYPE]   = 1;
-	options[METIS_OPTION_DBGLVL]  = 0;
-	options[METIS_OPTION_UFACTOR] = 30;
-	options[METIS_OPTION_MINCONN] = 0;
-	options[METIS_OPTION_CONTIG]  = 0;
-	options[METIS_OPTION_SEED]    = -1;
-	options[METIS_OPTION_NITER]   = 10;
-	options[METIS_OPTION_NCUTS]   = 1;
+	options[METIS_OPTION_PTYPE]   = METIS_PTYPE_KWAY;     /* partitioning method  */
+	options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;    /* type of objective */
+	options[METIS_OPTION_CTYPE]   = METIS_CTYPE_SHEM;     /* matching scheme to be used during coarsening.*/
+	options[METIS_OPTION_IPTYPE]  = METIS_IPTYPE_METISRB; /* algorithm used during initial partitioning*/
+	options[METIS_OPTION_RTYPE]   = METIS_RTYPE_GREEDY;   /* algorithm used for refinement*/
+	options[METIS_OPTION_DBGLVL]  = 0;                    /* amount of progress/debugging information will be printed */
+	options[METIS_OPTION_UFACTOR] = 30;                   /* maximum allowed load imbalance among the partitions*/
+	options[METIS_OPTION_MINCONN] = 0;                    /* explicitly minimize the maximum connectivity ?*/
+	options[METIS_OPTION_CONTIG]  = 0;                    /* force contiguous partitions?*/
+	options[METIS_OPTION_SEED]    = -1;                   /* seed for the random number generator*/
+	options[METIS_OPTION_NITER]   = 10;                   /* number of iterations for the refinement algorithms*/
+	options[METIS_OPTION_NCUTS]   = 1;                    /* number of different partitionings that it will compute*/
 
-	/*create eptr: */
-	eptr=xNew<idx_t>((*pnumberofelements+1));
+	/*create eptr*/
+	idx_t  k=0;
+	idx_t* eptr=xNew<idx_t>(numberofelements+1);
 	eptr[0]=0;
-	for(i=0;i<*pnumberofelements;i++){
+	for(int i=0;i<numberofelements;i++){
 		k+=3;
 		eptr[i+1]=k;
 	}
 
-	/*create tpwgts: */
-	tpwgts=xNew<real_t>(*pnum_procs);
-	for(i=0;i<*pnum_procs;i++){
-		tpwgts[i]=1.0/(*pnum_procs);
+	/*create tpwgts (Weight per processor)*/
+	real_t* tpwgts=xNew<real_t>(num_procs);
+	for(int i=0;i<num_procs;i++) tpwgts[i]=1.0/(num_procs);
+
+	/*create vwgt (Weight per node)*/
+	idx_t* vwgts=NULL;
+	if(vweights){
+		vwgts=xNew<idx_t>(numberofnodes);
+		for(int i=0;i<numberofnodes;i++) vwgts[i]=reCast<idx_t>(vweights[i]);
 	}
 
-	METIS_PartMeshNodal(pnumberofelements,pnumberofnodes, eptr, index,
-			NULL, NULL, pnum_procs, tpwgts, options, &objval,epart, npart);
+	/*Call METIS*/
+	idx_t objval;
+	int output = METIS_PartMeshNodal(&numberofelements,&numberofnodes,eptr,index,vwgts,NULL,&num_procs,tpwgts,options,&objval,epart,npart);
+	if(output!=METIS_OK) _error_("Could not partition mesh");
 
 	/*clean-up*/
+	xDelete<idx_t>(vwgts);
 	xDelete<idx_t>(eptr);
 	xDelete<real_t>(tpwgts);
Index: /issm/trunk/src/c/toolkits/metis/patches/metispatches.h
===================================================================
--- /issm/trunk/src/c/toolkits/metis/patches/metispatches.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/metis/patches/metispatches.h	(revision 24686)
@@ -12,17 +12,5 @@
 #endif
 
-void METIS_PartMeshNodalPatch(int *, int *, int *, int *, int *, int *, int *, int *, int *); //Common interface we are using in ISSM.
-
-extern "C" {
-
-#if _METIS_VERSION_ == 4
-void METIS_PartMeshNodal(int *, int *, idxtype *, int *, int *, int *, int *, idxtype *, idxtype *);
-#endif
-#if _METIS_VERSION_ == 5
-int METIS_PartMeshNodal(idx_t*, idx_t*, idx_t*, idx_t*, idx_t*, idx_t*, idx_t*, real_t*, idx_t*, idx_t*, idx_t*, idx_t*);
-int METIS_SetDefaultOptions(idx_t *options);
-#endif
-
-}
+void METIS_PartMeshNodalPatch(int numberofelements,int numberofnodes,int* index,int* vweights,int num_procs,int* epart,int* npart);
 
 #endif
Index: /issm/trunk/src/c/toolkits/mpi/issmmpi.h
===================================================================
--- /issm/trunk/src/c/toolkits/mpi/issmmpi.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/mpi/issmmpi.h	(revision 24686)
@@ -1,4 +1,4 @@
 /* \file issmmpi.h
- * \brief: header file that defines all the mpi wrappers that ISSM requires. The goal is to control 
+ * \brief: header file that defines all the mpi wrappers that ISSM requires. The goal is to control
  * which MPI layer we are using at compile time: the standard mpi, the autodiff mpi or no mpi at all.
  */
@@ -39,7 +39,7 @@
 			#include <ampi/ampi.h>
 		#endif
-	#elif  _HAVE_PETSC_MPI_ // Petsc now hides there MPI header. It can be reached through Petsc.
+	#elif  _HAVE_PETSC_MPI_ // PETSc now hides their MPI header. It can be reached through PETSc's header file.
 		#include <petsc.h>
-	#else 
+	#else
 		#include <mpi.h>
 	#endif
@@ -66,5 +66,5 @@
 	#define ISSM_MPI_INT           AMPI_INT
 	#define ISSM_MPI_LONG_LONG_INT AMPI_LONG_LONG_INT
-	
+
 	// operations
 	#define ISSM_MPI_MAX        AMPI_MAX
@@ -72,5 +72,5 @@
 	#define ISSM_MPI_PROD       AMPI_PROD
 	#define ISSM_MPI_SUM        AMPI_SUM
-	
+
 	// others
 	#define ISSM_MPI_COMM_WORLD    AMPI_COMM_WORLD
@@ -82,5 +82,5 @@
 		#if defined(_HAVE_AMPI_) && !defined(_WRAPPERS_)
 			#define ISSM_MPI_DOUBLE    AMPI_ADOUBLE
-		#else 
+		#else
 			#define ISSM_MPI_DOUBLE    MPI_DOUBLE
 		#endif
@@ -106,5 +106,5 @@
 	#include "./commops/commops.h"
 	/*}}}*/
-#else  
+#else
 	/*Our ISSM MPI defines: {{{*/
 	// types
@@ -134,10 +134,10 @@
 	#define ISSM_MPI_ANY_SOURCE    3
 	/*}}}*/
-#endif 
+#endif
 
 /*Dynamically return ISSM_MPI type from variable type */
 template <class T> ISSM_MPI_Datatype TypeToMPIType(){assert(false);};
 template <> inline ISSM_MPI_Datatype TypeToMPIType<IssmDouble>(){return ISSM_MPI_DOUBLE;};
-#if defined(_HAVE_AD_) && !defined(_WRAPPERS_) 
+#if defined(_HAVE_AD_) && !defined(_WRAPPERS_)
 template <> inline ISSM_MPI_Datatype TypeToMPIType<IssmPDouble>(){return ISSM_MPI_PDOUBLE;};
 #endif
@@ -154,5 +154,5 @@
 #ifdef _HAVE_MPI_
 # ifdef _HAVE_AMPI_
-	rc=AMPI_Bcast(buffer, 
+	rc=AMPI_Bcast(buffer,
 				count,
 				datatype,
@@ -160,5 +160,5 @@
 				comm);
 # else
-	rc=MPI_Bcast(buffer, 
+	rc=MPI_Bcast(buffer,
 				count,
 				datatype,
@@ -166,6 +166,6 @@
 				comm);
 # endif
-#else 
-	// nothing to be done here 
+#else
+	// nothing to be done here
 #endif
 	return rc;
@@ -173,6 +173,6 @@
 /* interfaces  {{{*/
 int ISSM_MPI_Allgather(void *sendbuf, int sendcount, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcount, ISSM_MPI_Datatype recvtype, ISSM_MPI_Comm comm);
-int ISSM_MPI_Allgatherv(void *sendbuf, int sendcount, ISSM_MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, ISSM_MPI_Datatype recvtype, ISSM_MPI_Comm comm); 
-int ISSM_MPI_Allreduce(void *sendbuf, void *recvbuf, int count, ISSM_MPI_Datatype datatype, ISSM_MPI_Op op, ISSM_MPI_Comm comm); 
+int ISSM_MPI_Allgatherv(void *sendbuf, int sendcount, ISSM_MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, ISSM_MPI_Datatype recvtype, ISSM_MPI_Comm comm);
+int ISSM_MPI_Allreduce(void *sendbuf, void *recvbuf, int count, ISSM_MPI_Datatype datatype, ISSM_MPI_Op op, ISSM_MPI_Comm comm);
 int ISSM_MPI_Barrier(ISSM_MPI_Comm comm);
 int ISSM_MPI_Bcast(void *buffer, int count, ISSM_MPI_Datatype datatype, int root, ISSM_MPI_Comm comm);
@@ -186,6 +186,6 @@
 int ISSM_MPI_Recv(void *buf, int count, ISSM_MPI_Datatype datatype, int source, int tag, ISSM_MPI_Comm comm, ISSM_MPI_Status *status);
 int ISSM_MPI_Reduce(void *sendbuf, void *recvbuf, int count, ISSM_MPI_Datatype datatype, ISSM_MPI_Op op, int root, ISSM_MPI_Comm comm);
-int ISSM_MPI_Scatter(void *sendbuf, int sendcnt, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm); 
-int ISSM_MPI_Scatterv(void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm); 
+int ISSM_MPI_Scatter(void *sendbuf, int sendcnt, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm);
+int ISSM_MPI_Scatterv(void *sendbuf, int *sendcnts, int *displs, ISSM_MPI_Datatype sendtype, void *recvbuf, int recvcnt, ISSM_MPI_Datatype recvtype, int root, ISSM_MPI_Comm comm);
 int ISSM_MPI_Send(void *buf, int count, ISSM_MPI_Datatype datatype, int dest, int tag, ISSM_MPI_Comm comm);
 double ISSM_MPI_Wtime(void);
@@ -194,5 +194,5 @@
 
 // special for Adol-C locations when buffers are allocated with new
-// this could end up in the xNew template specialized for adoubles 
+// this could end up in the xNew template specialized for adoubles
 // so as to not litter the code with it.
 void ISSM_MPI_ContiguousInAdolc(size_t aSize);
Index: /issm/trunk/src/c/toolkits/objects/Matrix.h
===================================================================
--- /issm/trunk/src/c/toolkits/objects/Matrix.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/objects/Matrix.h	(revision 24686)
@@ -307,5 +307,18 @@
 		}
 		/*}}}*/
-
+		/*
+		* sets all values to 0 but keeps the structure of a sparse matrix
+		*/
+		void SetZero(void) {/*{{{*/
+			if(type==PetscMatType){
+				#ifdef _HAVE_PETSC_
+				this->pmatrix->SetZero();
+				#endif
+			}
+			else{
+				this->imatrix->SetZero();
+			}
+		}
+		/*}}}*/
 };
 
Index: /issm/trunk/src/c/toolkits/petsc/objects/PetscMat.cpp
===================================================================
--- /issm/trunk/src/c/toolkits/petsc/objects/PetscMat.cpp	(revision 24685)
+++ /issm/trunk/src/c/toolkits/petsc/objects/PetscMat.cpp	(revision 24686)
@@ -193,2 +193,6 @@
 }
 /*}}}*/
+void PetscMat::SetZero(void){/*{{{*/
+	MatZeroEntries(this->matrix);
+}
+/*}}}*/
Index: /issm/trunk/src/c/toolkits/petsc/objects/PetscMat.h
===================================================================
--- /issm/trunk/src/c/toolkits/petsc/objects/PetscMat.h	(revision 24685)
+++ /issm/trunk/src/c/toolkits/petsc/objects/PetscMat.h	(revision 24686)
@@ -52,4 +52,5 @@
 		void SetValues(int m,int* idxm,int n,int* idxn,IssmDouble* values,InsMode mode);
 		void Convert(MatrixType type);
+		void SetZero(void);
 };
 
Index: /issm/trunk/src/m/Makefile.am
===================================================================
--- /issm/trunk/src/m/Makefile.am	(revision 24685)
+++ /issm/trunk/src/m/Makefile.am	(revision 24686)
@@ -3,77 +3,94 @@
 #find . -type d -exec ls -d {} \;
 
-bin_SCRIPTS = 
+bin_SCRIPTS =
 if WRAPPERS
 if MATLAB
 if !DEVELOPMENT
-bin_SCRIPTS += ${ISSM_DIR}/src/m/qmu/*.m \
-					${ISSM_DIR}/src/m/archive/*.m \
-					${ISSM_DIR}/src/m/qmu/setupdesign/*.m \
-					${ISSM_DIR}/src/m/qmu/plot/*.m \
-					${ISSM_DIR}/src/m/qmu/examples/*.m \
-					${ISSM_DIR}/src/m/kml/*.m \
-					${ISSM_DIR}/src/m/dev/issmversion.m \
-					${ISSM_DIR}/src/m/classes/*.m \
-					${ISSM_DIR}/src/m/classes/qmu/*.m \
-					${ISSM_DIR}/src/m/classes/qmu/@dakota_method/*.m \
-					${ISSM_DIR}/src/m/classes/clusters/*.m \
-					${ISSM_DIR}/src/m/consistency/*.m \
-					${ISSM_DIR}/src/m/array/*.m \
-					${ISSM_DIR}/src/m/boundaryconditions/*.m \
-					${ISSM_DIR}/src/m/exp/*.m \
-					${ISSM_DIR}/src/m/exp/operation/*.m \
-					${ISSM_DIR}/src/m/geometry/*.m \
-					${ISSM_DIR}/src/m/interp/*.m \
-					${ISSM_DIR}/src/m/coordsystems/*.m \
-					${ISSM_DIR}/src/m/mech/*.m \
-					${ISSM_DIR}/src/m/mesh/*.m \
-					${ISSM_DIR}/src/m/mesh/planet/spheretri/*.m \
-					${ISSM_DIR}/src/m/mesh/planet/gmsh/*.m \
-					${ISSM_DIR}/src/m/mesh/rifts/*.m \
-					${ISSM_DIR}/src/m/miscellaneous/*.m \
-					${ISSM_DIR}/src/m/modules/*.m \
-					${ISSM_DIR}/src/m/os/*.m \
-					${ISSM_DIR}/src/m/plot/*.m \
-					${ISSM_DIR}/src/m/plot/colormaps/*.m \
-					${ISSM_DIR}/src/m/string/*.m \
-					${ISSM_DIR}/src/m/extrusion/*.m \
-					${ISSM_DIR}/src/m/inversions/*.m \
-					${ISSM_DIR}/src/m/io/*.m \
-					${ISSM_DIR}/src/m/parameterization/*.m \
-					${ISSM_DIR}/src/m/partition/*.m \
-					${ISSM_DIR}/src/m/print/*.m \
-					${ISSM_DIR}/src/m/regional/*.m \
-					${ISSM_DIR}/src/m/solve/*.m \
-					${ISSM_DIR}/src/m/solvers/*.m \
-					${ISSM_DIR}/src/m/materials/*.m
+# TODO: There should be a better way of doing this so that manual updating is
+#		not required when a new subdirectory or module is introduced (search
+#		all subdirectories for file type, checking against an exclude list)
+bin_SCRIPTS += \
+	${ISSM_DIR}/src/m/archive/*.m \
+	${ISSM_DIR}/src/m/array/*.m \
+	${ISSM_DIR}/src/m/boundaryconditions/*.m \
+	${ISSM_DIR}/src/m/classes/*.m \
+	${ISSM_DIR}/src/m/classes/clusters/*.m \
+	${ISSM_DIR}/src/m/classes/qmu/*.m \
+	${ISSM_DIR}/src/m/classes/qmu/dakota_method/*.m \
+	${ISSM_DIR}/src/m/consistency/*.m \
+	${ISSM_DIR}/src/m/coordsystems/*.m \
+	${ISSM_DIR}/src/m/dev/issmversion.m \
+	${ISSM_DIR}/src/m/exp/*.m \
+	${ISSM_DIR}/src/m/exp/operation/*.m \
+	${ISSM_DIR}/src/m/extrusion/*.m \
+	${ISSM_DIR}/src/m/geometry/*.m \
+	${ISSM_DIR}/src/m/interp/*.m \
+	${ISSM_DIR}/src/m/inversions/*.m \
+	${ISSM_DIR}/src/m/io/*.m \
+	${ISSM_DIR}/src/m/kml/*.m \
+	${ISSM_DIR}/src/m/materials/*.m \
+	${ISSM_DIR}/src/m/mech/*.m \
+	${ISSM_DIR}/src/m/mesh/*.m \
+	${ISSM_DIR}/src/m/mesh/planet/gmsh/*.m \
+	${ISSM_DIR}/src/m/mesh/planet/spheretri/*.m \
+	${ISSM_DIR}/src/m/mesh/rifts/*.m \
+	${ISSM_DIR}/src/m/miscellaneous/*.m \
+	${ISSM_DIR}/src/m/modules/*.m \
+	${ISSM_DIR}/src/m/os/*.m \
+	${ISSM_DIR}/src/m/parameterization/*.m \
+	${ISSM_DIR}/src/m/partition/*.m \
+	${ISSM_DIR}/src/m/plot/*.m \
+	${ISSM_DIR}/src/m/plot/colormaps/*.m \
+	${ISSM_DIR}/src/m/print/*.m \
+	${ISSM_DIR}/src/m/qmu/*.m \
+	${ISSM_DIR}/src/m/qmu/examples/*.m \
+	${ISSM_DIR}/src/m/qmu/plot/*.m \
+	${ISSM_DIR}/src/m/qmu/setupdesign/*.m \
+	${ISSM_DIR}/src/m/regional/*.m \
+	${ISSM_DIR}/src/m/solve/*.m \
+	${ISSM_DIR}/src/m/solvers/*.m \
+	${ISSM_DIR}/src/m/string/*.m
 endif
 endif
+
 if PYTHON
 if !DEVELOPMENT
-bin_SCRIPTS += ${ISSM_DIR}/src/m/archive/*.py \
-					${ISSM_DIR}/src/m/classes/*.py \
-					${ISSM_DIR}/src/m/classes/clusters/*.py \
-					${ISSM_DIR}/src/m/consistency/*.py \
-					${ISSM_DIR}/src/m/dev/issmversion.py \
-					${ISSM_DIR}/src/m/boundaryconditions/*.py \
-					${ISSM_DIR}/src/m/exp/*.py \
-					${ISSM_DIR}/src/m/geometry/*.py \
-					${ISSM_DIR}/src/m/coordsystems/*.py \
-					${ISSM_DIR}/src/m/interp/*.py \
-					${ISSM_DIR}/src/m/inversions/*.py \
-					${ISSM_DIR}/src/m/mech/*.py \
-					${ISSM_DIR}/src/m/mesh/*.py \
-					${ISSM_DIR}/src/m/mesh/rifts/*.py \
-					${ISSM_DIR}/src/m/mesh/planet/gmsh/*.py \
-					${ISSM_DIR}/src/m/miscellaneous/*.py \
-					${ISSM_DIR}/src/m/modules/*.py \
-					${ISSM_DIR}/src/m/os/*.py \
-					${ISSM_DIR}/src/m/plot/*.py \
-					${ISSM_DIR}/src/m/extrusion/*.py \
-					${ISSM_DIR}/src/m/io/*.py \
-					${ISSM_DIR}/src/m/parameterization/*.py \
-					${ISSM_DIR}/src/m/solve/*.py \
-					${ISSM_DIR}/src/m/solvers/*.py \
-					${ISSM_DIR}/src/m/materials/*.py
+# TODO: There should be a better way of doing this so that manual updating is
+#		not required when a new subdirectory or module is introduced (search
+#		all subdirectories for file type, checking against an exclude list)
+bin_SCRIPTS += \
+	${ISSM_DIR}/src/m/archive/*.py \
+	${ISSM_DIR}/src/m/array/*.py \
+	${ISSM_DIR}/src/m/boundaryconditions/*.py \
+	${ISSM_DIR}/src/m/classes/*.py \
+	${ISSM_DIR}/src/m/classes/clusters/*.py \
+	${ISSM_DIR}/src/m/classes/qmu/*.py \
+	${ISSM_DIR}/src/m/classes/qmu/dakota_method/*.py \
+	${ISSM_DIR}/src/m/consistency/*.py \
+	${ISSM_DIR}/src/m/coordsystems/*.py \
+	${ISSM_DIR}/src/m/dev/issmversion.py \
+	${ISSM_DIR}/src/m/exp/*.py \
+	${ISSM_DIR}/src/m/extrusion/*.py \
+	${ISSM_DIR}/src/m/geometry/*.py \
+	${ISSM_DIR}/src/m/interp/*.py \
+	${ISSM_DIR}/src/m/inversions/*.py \
+	${ISSM_DIR}/src/m/io/*.py \
+	${ISSM_DIR}/src/m/materials/*.py \
+	${ISSM_DIR}/src/m/mech/*.py \
+	${ISSM_DIR}/src/m/mesh/*.py \
+	${ISSM_DIR}/src/m/mesh/planet/gmsh/*.py \
+	${ISSM_DIR}/src/m/mesh/rifts/*.py \
+	${ISSM_DIR}/src/m/miscellaneous/*.py \
+	${ISSM_DIR}/src/m/modules/*.py \
+	${ISSM_DIR}/src/m/os/*.py \
+	${ISSM_DIR}/src/m/parameterization/*.py \
+	${ISSM_DIR}/src/m/partition/*.py \
+	${ISSM_DIR}/src/m/plot/*.py \
+	${ISSM_DIR}/src/m/plot/colormaps/*.py \
+	${ISSM_DIR}/src/m/qmu/*.py \
+	${ISSM_DIR}/src/m/qmu/setupdesign/*.py \
+	${ISSM_DIR}/src/m/shp/*.py \
+	${ISSM_DIR}/src/m/solve/*.py \
+	${ISSM_DIR}/src/m/solvers/*.py
 endif
 endif
@@ -81,24 +98,27 @@
 if JAVASCRIPT
 if !DEVELOPMENT
-
-js_scripts = ${ISSM_DIR}/src/m/array/*.js \
-				${ISSM_DIR}/src/m/boundaryconditions/*.js \
-				${ISSM_DIR}/src/m/classes/*.js \
-                ${ISSM_DIR}/src/m/classes/clusters/*.js \
-				${ISSM_DIR}/src/m/consistency/*.js \
-				${ISSM_DIR}/src/m/exp/*.js \
-				${ISSM_DIR}/src/m/extrusion/*.js \
-				${ISSM_DIR}/src/m/geometry/*.js \
-				${ISSM_DIR}/src/m/inversions/*.js \
-				${ISSM_DIR}/src/m/io/*.js \
-				${ISSM_DIR}/src/m/materials/*.js \
-				${ISSM_DIR}/src/m/mesh/*.js \
-				${ISSM_DIR}/src/m/miscellaneous/*.js \
-				${ISSM_DIR}/src/m/parameterization/*.js \
-				${ISSM_DIR}/src/m/plot/*.js \
-				${ISSM_DIR}/src/m/print/*.js \
-				${ISSM_DIR}/src/m/shp/*.js \
-				${ISSM_DIR}/src/m/solve/*.js \
-				${ISSM_DIR}/src/m/solvers/*.js
+# TODO: There should be a better way of doing this so that manual updating is
+#		not required when a new subdirectory or module is introduced (search
+#		all subdirectories for file type, checking against an exclude list)
+js_scripts = \
+	${ISSM_DIR}/src/m/array/*.js \
+	${ISSM_DIR}/src/m/boundaryconditions/*.js \
+	${ISSM_DIR}/src/m/classes/*.js \
+	${ISSM_DIR}/src/m/classes/clusters/*.js \
+	${ISSM_DIR}/src/m/consistency/*.js \
+	${ISSM_DIR}/src/m/exp/*.js \
+	${ISSM_DIR}/src/m/extrusion/*.js \
+	${ISSM_DIR}/src/m/geometry/*.js \
+	${ISSM_DIR}/src/m/inversions/*.js \
+	${ISSM_DIR}/src/m/io/*.js \
+	${ISSM_DIR}/src/m/materials/*.js \
+	${ISSM_DIR}/src/m/mesh/*.js \
+	${ISSM_DIR}/src/m/miscellaneous/*.js \
+	${ISSM_DIR}/src/m/parameterization/*.js \
+	${ISSM_DIR}/src/m/plot/*.js \
+	${ISSM_DIR}/src/m/print/*.js \
+	${ISSM_DIR}/src/m/shp/*.js \
+	${ISSM_DIR}/src/m/solve/*.js \
+	${ISSM_DIR}/src/m/solvers/*.js
 bin_SCRIPTS +=  issm-bin.js
 
Index: /issm/trunk/src/m/classes/SMBd18opdd.m
===================================================================
--- /issm/trunk/src/m/classes/SMBd18opdd.m	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBd18opdd.m	(revision 24686)
@@ -93,16 +93,6 @@
 
 				if(self.isd18opd==1)
-					md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					%cross check that time stamp values are between 0 and 1:
-					i=find(self.temperatures_presentday(end,:)-self.precipitations_presentday(end,:)~=0);
-					if length(i),
-						error('Smb checkconsistency fail: timestamp (last row of matrix) for present day temp and precip must be equal!');
-					end
-					j=find(self.temperatures_presentday(end,:)<0 | self.temperatures_presentday(end,:)>1);
-					if length(j),
-						error('Smb checkconsistency fail: timestamp (last row of matrix) for present day temp and precip must be between 0 and 1!');
-					end
-
+					md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1,'timeseries',1);
+					md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1,'timeseries',1);
 					md = checkfield(md,'fieldname','smb.delta18o','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
 					md = checkfield(md,'fieldname','smb.dpermil','>=',0,'numel',1);
Index: /issm/trunk/src/m/classes/SMBd18opdd.py
===================================================================
--- /issm/trunk/src/m/classes/SMBd18opdd.py	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBd18opdd.py	(revision 24686)
@@ -141,6 +141,6 @@
                 multt = np.ceil(lent / 12.) * 12.
                 multp = np.ceil(lenp / 12.) * 12.
-                md = checkfield(md, 'fieldname', 'smb.temperatures_presentday', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.precipitations_presentday', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
+                md = checkfield(md, 'fieldname', 'smb.temperatures_presentday', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.precipitations_presentday', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
 
                 if self.istemperaturescaled == 0:
Index: /issm/trunk/src/m/classes/SMBgradientscomponents.m
===================================================================
--- /issm/trunk/src/m/classes/SMBgradientscomponents.m	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBgradientscomponents.m	(revision 24686)
@@ -46,8 +46,8 @@
 		function	md=checkconsistency(self,md,solution,analyses) % {{{
 			if ismember('MasstransportAnalysis',analyses),
-				md = checkfield(md,'fieldname','smb.accuref','timeseries',1,'NaN',1,'Inf',1);
+				md = checkfield(md,'fieldname','smb.accuref','singletimeseries',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.accualti','numel',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.accugrad','numel',1,'NaN',1,'Inf',1);
-				md = checkfield(md,'fieldname','smb.runoffref','timeseries',1,'NaN',1,'Inf',1);
+				md = checkfield(md,'fieldname','smb.runoffref','singletimeseries',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.runoffalti','numel',1,'NaN',1,'Inf',1);
 				md = checkfield(md,'fieldname','smb.runoffgrad','numel',1,'NaN',1,'Inf',1);
Index: /issm/trunk/src/m/classes/SMBpdd.m
===================================================================
--- /issm/trunk/src/m/classes/SMBpdd.m	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBpdd.m	(revision 24686)
@@ -90,15 +90,15 @@
 					md = checkfield(md,'fieldname','smb.delta18o','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
 					md = checkfield(md,'fieldname','smb.delta18o_surface','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
-					md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
+					md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
+					md = checkfield(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
+					md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
+					md = checkfield(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
 					md = checkfield(md,'fieldname','smb.Tdiff','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
 					md = checkfield(md,'fieldname','smb.sealev','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
 				elseif(self.ismungsm==1)
-					md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
-					md = checkfield(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices+1 12],'NaN',1,'Inf',1,'timeseries',1);
+					md = checkfield(md,'fieldname','smb.temperatures_presentday','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
+					md = checkfield(md,'fieldname','smb.temperatures_lgm','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
+					md = checkfield(md,'fieldname','smb.precipitations_presentday','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
+					md = checkfield(md,'fieldname','smb.precipitations_lgm','size',[md.mesh.numberofvertices 12],'NaN',1,'Inf',1);
 					md = checkfield(md,'fieldname','smb.Pfac','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
 					md = checkfield(md,'fieldname','smb.Tdiff','NaN',1,'Inf',1,'size',[2,NaN],'singletimeseries',1);
Index: /issm/trunk/src/m/classes/SMBpdd.py
===================================================================
--- /issm/trunk/src/m/classes/SMBpdd.py	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBpdd.py	(revision 24686)
@@ -143,18 +143,18 @@
                 md = checkfield(md, 'fieldname', 'smb.delta18o', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
                 md = checkfield(md, 'fieldname', 'smb.delta18o_surface', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.temperatures_presentday', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.temperatures_lgm', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.precipitations_presentday', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.precipitations_lgm', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
+                md = checkfield(md, 'fieldname', 'smb.temperatures_presentday', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.temperatures_lgm', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.precipitations_presentday', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.precipitations_lgm', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
                 md = checkfield(md, 'fieldname', 'smb.Tdiff', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
                 md = checkfield(md, 'fieldname', 'smb.sealev', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
             elif self.ismungsm:
-                md = checkfield(md, 'fieldname', 'smb.temperatures_presentday', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.temperatures_lgm', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.precipitations_presentday', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.precipitations_lgm', 'size', [md.mesh.numberofvertices + 1, 12], 'NaN', 1, 'Inf', 1, 'timeseries', 1)
+                md = checkfield(md, 'fieldname', 'smb.temperatures_presentday', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.temperatures_lgm', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.precipitations_presentday', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
+                md = checkfield(md, 'fieldname', 'smb.precipitations_lgm', 'size', [md.mesh.numberofvertices, 12], 'NaN', 1, 'Inf', 1)
                 md = checkfield(md, 'fieldname', 'smb.Pfac', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.Tdiff', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
-                md = checkfield(md, 'fieldname', 'smb.sealev', 'NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
+                md = checkfield(md, 'fieldname', 'smb.Tdiff','NaN', 1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
+                md = checkfield(md, 'fieldname', 'smb.sealev','NaN',1, 'Inf', 1, 'size', [2, np.nan], 'singletimeseries', 1)
 
         md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1])
Index: /issm/trunk/src/m/classes/SMBpddSicopolis.m
===================================================================
--- /issm/trunk/src/m/classes/SMBpddSicopolis.m	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBpddSicopolis.m	(revision 24686)
@@ -81,6 +81,6 @@
 				md = checkfield(md,'fieldname','smb.s0t','>=',0,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
 				md = checkfield(md,'fieldname','smb.rlaps','>=',0,'numel',1);
-				md = checkfield(md,'fieldname','smb.monthlytemperatures','timeseries',1,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices+1 12]);
-				md = checkfield(md,'fieldname','smb.precipitation','timeseries',1,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices+1 12]);
+				md = checkfield(md,'fieldname','smb.monthlytemperatures','timeseries',1,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 12]);
+				md = checkfield(md,'fieldname','smb.precipitation','timeseries',1,'NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 12]);
 
 			end
Index: /issm/trunk/src/m/classes/SMBpddSicopolis.py
===================================================================
--- /issm/trunk/src/m/classes/SMBpddSicopolis.py	(revision 24685)
+++ /issm/trunk/src/m/classes/SMBpddSicopolis.py	(revision 24686)
@@ -97,6 +97,6 @@
             md = checkfield(md, 'fieldname', 'smb.s0t', '>=', 0, 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 1])
             md = checkfield(md, 'fieldname', 'smb.rlaps', '>=', 0, 'numel', 1)
-            md = checkfield(md, 'fieldname', 'smb.monthlytemperatures', 'timeseries', 1, 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices + 1, 12])
-            md = checkfield(md, 'fieldname', 'smb.precipitation', 'timeseries', 1, 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices + 1, 12])
+            md = checkfield(md, 'fieldname', 'smb.monthlytemperatures', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 12])
+            md = checkfield(md, 'fieldname', 'smb.precipitation','NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices, 12])
 
         md = checkfield(md, 'fieldname', 'smb.steps_per_step', '>=', 1, 'numel', [1])
Index: /issm/trunk/src/m/classes/basalforcingspico.m
===================================================================
--- /issm/trunk/src/m/classes/basalforcingspico.m	(revision 24685)
+++ /issm/trunk/src/m/classes/basalforcingspico.m	(revision 24686)
@@ -9,5 +9,5 @@
 		basin_id                  = NaN;
 		maxboxcount               = 0;
-		overturning_coeff         = 0.;
+		overturning_coeff         = NaN;
 		gamma_T                   = 0.;
 		farocean_temperature      = NaN;
@@ -40,5 +40,5 @@
 		   end
 			if isnan(self.overturning_coeff)
-				self.overturning_coeff = 1e6; %m^3/s
+				self.overturning_coeff = 1e6*ones(md.mesh.numberofvertices,1); %m^3/s
 				disp('      no overturning strength set, setting value to 1e6');
 			end
@@ -66,5 +66,9 @@
 				md = checkfield(md,'fieldname','basalforcings.basin_id','Inf',1,'>=',0,'<=',md.basalforcings.num_basins,'size',[md.mesh.numberofelements 1]);
 				md = checkfield(md,'fieldname','basalforcings.maxboxcount','numel',1,'NaN',1,'Inf',1,'>',0);
-				md = checkfield(md,'fieldname','basalforcings.overturning_coeff','numel',1,'NaN',1,'Inf',1,'>',0);
+				if numel(self.overturning_coeff)==1
+					md = checkfield(md,'fieldname','basalforcings.overturning_coeff','numel',1,'NaN',1,'Inf',1,'>',0);
+				else
+					md = checkfield(md,'fieldname','basalforcings.overturning_coeff','size',[md.mesh.numberofvertices 1],'NaN',1,'Inf',1,'>',0);
+				end
 				md = checkfield(md,'fieldname','basalforcings.gamma_T','numel',1,'NaN',1,'Inf',1,'>',0);
 				md = checkfield(md,'fieldname','basalforcings.farocean_temperature','NaN',1,'Inf',1,'size',[md.basalforcings.num_basins+1 NaN]);
@@ -96,5 +100,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','num_basins','format','Integer');
 			WriteData(fid,prefix,'object',self,'fieldname','maxboxcount','format','Integer');
-			WriteData(fid,prefix,'object',self,'fieldname','overturning_coeff','format','Double');
+			WriteData(fid,prefix,'object',self,'fieldname','overturning_coeff','format','DoubleMat','mattype',1);
 			WriteData(fid,prefix,'object',self,'fieldname','gamma_T','format','Double');
 			WriteData(fid,prefix,'object',self,'fieldname','farocean_temperature','format','DoubleMat','name','md.basalforcings.farocean_temperature','timeserieslength',md.basalforcings.num_basins+1,'yts',md.constants.yts);
Index: /issm/trunk/src/m/classes/boundary.m
===================================================================
--- /issm/trunk/src/m/classes/boundary.m	(revision 24685)
+++ /issm/trunk/src/m/classes/boundary.m	(revision 24686)
@@ -82,4 +82,5 @@
 			offset=getfieldvalue(options,'offset',.1);
 			fontsize=getfieldvalue(options,'fontsize',10);
+			label=getfieldvalue(options,'label','on');
 
 			%read domain:
@@ -104,11 +105,18 @@
 			for i=1:length(domain),
 				hold on;
+				x=domain(i).x*unitmultiplier;
+				y=domain(i).y*unitmultiplier;
 				if length(x)==1,
 					p=plot(x,y,'k*'); 
-					set(p,'MarkerSize',markersize);
-					t=text(x,y,self.shpfilename,'FontSize',fontsize);
+					set(p,'MarkerSize',markersize,'Color',color);
+					if strcmpi(label,'on'),
+						t=text(x,y,self.shpfilename,'FontSize',fontsize);
+					end
 				else
 					p=plot(x,y,'k-'); 
-					text(sum(x)/length(x),sum(y)/length(y),self.shpfilename,'FontSize',fontsize);
+					set(p,'MarkerSize',markersize,'Color',color);
+					if strcmpi(label,'on'),
+						text(sum(x)/length(x),sum(y)/length(y),self.shpfilename,'FontSize',fontsize);
+					end
 				end
 				set(p,'Color',color);
Index: /issm/trunk/src/m/classes/clusters/generic.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic.m	(revision 24685)
+++ /issm/trunk/src/m/classes/clusters/generic.m	(revision 24686)
@@ -6,5 +6,5 @@
 
 classdef generic
-	properties (SetAccess=public) 
+	properties (SetAccess=public)
 		% {{{
 		name          = '';
@@ -75,10 +75,10 @@
 		%}}}
 		function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
-
-			%write queuing script 
-			%what is the executable being called? 
-			executable='issm.exe';
+			% Which executable are we calling?
+			executable='issm.exe'; % default
+
 			if isdakota,
-				version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
+				version=IssmConfig('_DAKOTA_VERSION_');
+				version=str2num(version(1:3));
 				if (version>=6),
 					executable='issm_dakota.exe';
@@ -90,5 +90,4 @@
 
 			if ~ispc(),
-
 				fid=fopen([modelname '.queue'],'w');
 				fprintf(fid,'#!%s\n',cluster.shell);
@@ -111,6 +110,6 @@
 				else
 					%Add --gen-suppressions=all to get suppression lines
-					fprintf(fid,'LD_PRELOAD=%s \\\n',cluster.valgrindlib);
-					if ismac, 
+					%fprintf(fid,'LD_PRELOAD=%s \\\n',cluster.valgrindlib); it could be deleted
+					if ismac,
 						if IssmConfig('_HAVE_MPI_'),
 							fprintf(fid,'mpiexec -np %i %s --leak-check=full --error-limit=no --dsymutil=yes --suppressions=%s %s/%s %s %s %s 2> %s.errlog >%s.outlog ',...
@@ -161,33 +160,33 @@
 		%}}}
 		function BuildQueueScriptMultipleModels(cluster,dirname,modelname,solution,dirnames,modelnames,nps) % {{{
-		
-			%some checks: 
+
+			%some checks:
 			if isempty(modelname), error('BuildQueueScriptMultipleModels error message: need a non empty model name!');end
 
-			%what is the executable being called? 
+			%what is the executable being called?
 			executable='issm_slr.exe';
 
 			if ispc(), error('BuildQueueScriptMultipleModels not support yet on windows machines');end;
-			
-			%write queuing script 
+
+			%write queuing script
 			fid=fopen([modelname '.queue'],'w');
-			
+
 			fprintf(fid,'#!%s\n',cluster.shell);
 
-			%number of cpus: 
+			%number of cpus:
 			mpistring=sprintf('mpiexec -np %i ',cluster.np);
 
-			%executable: 
+			%executable:
 			mpistring=[mpistring sprintf('%s/%s ',cluster.codepath,executable)];
-			
-			%solution name: 
+
+			%solution name:
 			mpistring=[mpistring sprintf('%s ',solution)];
 
-			%execution directory and model name: 
+			%execution directory and model name:
 			mpistring=[mpistring sprintf('%s/%s %s',cluster.executionpath,dirname,modelname)];
 
-			%inform main executable of how many icecaps, glaciers and earth models are being run: 
+			%inform main executable of how many icecaps, glaciers and earth models are being run:
 			mpistring=[mpistring sprintf(' %i ',length(dirnames))];
-			
+
 			%icecaps, glaciers and earth location, names and number of processors associated:
 			for i=1:length(dirnames),
@@ -195,10 +194,10 @@
 			end
 
-			%log files: 
+			%log files:
 			if ~cluster.interactive,
 				mpistring=[mpistring sprintf('2> %s.errlog> %s.outlog',modelname,modelname)];
 			end
 
-			%write this long string to disk: 
+			%write this long string to disk:
 			fprintf(fid,mpistring);
 			fclose(fid);
@@ -213,6 +212,6 @@
 		function BuildQueueScriptIceOcean(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota) % {{{
 
-			%write queuing script 
-			%what is the executable being called? 
+			%write queuing script
+			%what is the executable being called?
 			executable='issm_ocean.exe';
 
@@ -237,5 +236,5 @@
 		function BuildKrigingQueueScript(cluster,modelname,solution,io_gather,isvalgrind,isgprof) % {{{
 
-			%write queuing script 
+			%write queuing script
 			if ~ispc(),
 
@@ -252,5 +251,5 @@
 				else
 					%Add --gen-suppressions=all to get suppression lines
-					fprintf(fid,'LD_PRELOAD=%s \\\n',cluster.valgrindlib);
+					%fprintf(fid,'LD_PRELOAD=%s \\\n',cluster.valgrindlib); it could be deleted
 					fprintf(fid,'mpiexec -np %i %s --leak-check=full --suppressions=%s %s/kriging.exe %s %s 2> %s.errlog >%s.outlog ',...
 						cluster.np,cluster.valgrind,cluster.valgrindsup,cluster.codepath,[cluster.executionpath '/' modelname],modelname,modelname,modelname);
Index: /issm/trunk/src/m/classes/clusters/generic.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic.py	(revision 24685)
+++ /issm/trunk/src/m/classes/clusters/generic.py	(revision 24686)
@@ -1,6 +1,5 @@
-import socket
-import os
-import math
-import subprocess
+import numpy as np
+from socket import gethostname
+from subprocess import call
 from IssmConfig import IssmConfig
 from issmdir import issmdir
@@ -9,5 +8,9 @@
 from issmscpin import issmscpin
 from issmscpout import issmscpout
-import MatlabFuncs as m
+from MatlabFuncs import ispc
+try:
+    from generic_settings import generic_settings
+except ImportError:
+    print('Warning generic_settings.py not found, default will be used')
 
 
@@ -31,5 +34,7 @@
         self.valgrind = issmdir() + '/externalpackages/valgrind/install/bin/valgrind'
         self.valgrindlib = issmdir() + '/externalpackages/valgrind/install/lib/libmpidebug.so'
-        self.valgrindsup = issmdir() + '/externalpackages/valgrind/issm.supp'
+        self.valgrindsup = [issmdir() + '/externalpackages/valgrind/issm.supp']  # add any .supp in list form as needed
+        self.verbose = 1
+        self.shell = '/bin/sh'
 
         #use provided options to change fields
@@ -37,9 +42,14 @@
 
         #get name
-        self.name = socket.gethostname()
+        self.name = gethostname()
 
         #initialize cluster using user settings if provided
-        if os.path.exists(self.name + '_settings.py'):
-            exec(compile(open(self.name + '_settings.py').read(), self.name + '_settings.py', 'exec'), globals())
+
+        try:
+            self = generic_settings(self)
+        except NameError:
+            print("generic_settings.py not found, using default settings")
+        # else:
+        #     raise
 
         #OK get other fields
@@ -59,4 +69,6 @@
         s += "    valgrindlib: %s\n" % self.valgrindlib
         s += "    valgrindsup: %s\n" % self.valgrindsup
+        s += "    verbose: %s\n" % self.verbose
+        s += "    shell: %s\n" % self.shell
         return s
     # }}}
@@ -64,13 +76,15 @@
     def checkconsistency(self, md, solution, analyses):  # {{{
         if self.np < 1:
-            md = checkmessage(md, 'number of processors should be at least 1')
-        if math.isnan(self.np):
-            md = checkmessage(md, 'number of processors should not be NaN!')
+            md.checkmessage('number of processors should be at least 1')
+        if np.isnan(self.np):
+            md.checkmessage('number of processors should not be NaN!')
 
         return md
     # }}}
+
     def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling):  # {{{
-
-        executable = 'issm.exe'
+        # Which executable are we calling?
+        executable = 'issm.exe'  # default
+
         if isdakota:
             version = IssmConfig('_DAKOTA_VERSION_')
@@ -81,6 +95,6 @@
             executable = 'issm_ocean.exe'
 
-        #write queuing script
-        if not m.ispc():
+        # Write queueing script
+        if not ispc():
             fid = open(modelname + '.queue', 'w')
             fid.write('#!/bin/sh\n')
@@ -102,11 +116,15 @@
             else:
                 #Add --gen -suppressions = all to get suppression lines
-                fid.write('LD_PRELOAD={} \\\n'.format(self.valgrindlib))
+                #fid.write('LD_PRELOAD={} \\\n'.format(self.valgrindlib)) it could be deleted
+                supstring = ''
+                for supfile in self.valgrindsup:
+                    supstring += ' --suppressions=' + supfile
+
                 if IssmConfig('_HAVE_MPI_')[0]:
-                    fid.write('mpiexec -np {} {} --leak-check=full --suppressions={} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.
-                              format(self.np, self.valgrind, self.valgrindsup, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
+                    fid.write('mpiexec -np {} {} --leak-check=full {} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.
+                              format(self.np, self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
                 else:
-                    fid.write('{} --leak-check=full --suppressions={} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.
-                              format(self.valgrind, self.valgrindsup, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
+                    fid.write('{} --leak-check=full {} {}/{} {} {}/{} {} 2>{}.errlog>{}.outlog '.
+                              format(self.valgrind, supstring, self.codepath, executable, solution, self.executionpath, dirname, modelname, modelname, modelname))
 
             if not io_gather:  #concatenate the output files:
@@ -134,5 +152,5 @@
     def BuildKrigingQueueScript(self, modelname, solution, io_gather, isvalgrind, isgprof):  # {{{
         #write queuing script
-        if not m.ispc():
+        if not ispc():
             fid = open(modelname + '.queue', 'w')
             fid.write('#!/bin/sh\n')
@@ -147,6 +165,5 @@
                 #Add - -    gen - suppressions = all to get suppression lines
                 #fid.write('LD_PRELOAD={} \\\n'.format(self.valgrindlib))
-                fid.write('mpiexec -np {} {} --leak -check=full --suppressions={} {}/kriging.exe {}/{} {} 2 > {}.errlog > {}.outlog ' .format
-                          (self.np, self.valgrind, self.valgrindsup, self.codepath, self.executionpath, modelname, modelname, modelname, modelname))
+                fid.write('mpiexec -np {} {} --leak -check=full --suppressions={} {}/kriging.exe {}/{} {} 2 > {}.errlog > {}.outlog ' .format(self.np, self.valgrind, self.valgrindsup, self.codepath, self.executionpath, modelname, modelname, modelname, modelname))
             if not io_gather:    #concatenate the output files:
                 fid.write('\ncat {}.outbin. *>{}.outbin'.format(modelname, modelname))
@@ -178,5 +195,5 @@
         if self.interactive:
             compressstring += ' {}.errlog {}.outlog '.format(modelname, modelname)
-        subprocess.call(compressstring, shell=True)
+        call(compressstring, shell=True)
 
         print('uploading input file and queueing script')
@@ -188,15 +205,15 @@
         print('launching solution sequence on remote cluster')
         if restart:
-            launchcommand = 'cd {} && cd {} chmod 777 {}.queue && ./{}.queue'.format(self.executionpath, dirname, modelname, modelname)
+            launchcommand = 'cd {} && cd {} chmod 755 {}.queue && ./{}.queue'.format(self.executionpath, dirname, modelname, modelname)
         else:
             if batch:
                 launchcommand = 'cd {} && rm -rf ./{} && mkdir {} && cd {} && mv ../{}.tar.gz ./&& tar -zxf {}.tar.gz'.format(self.executionpath, dirname, dirname, dirname, dirname, dirname)
             else:
-                launchcommand = 'cd {} && rm -rf ./{} && mkdir {} && cd {} && mv ../{}.tar.gz ./&& tar -zxf {}.tar.gz  && chmod 777 {}.queue && ./{}.queue'.format(self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname, modelname)
+                launchcommand = 'cd {} && rm -rf ./{} && mkdir {} && cd {} && mv ../{}.tar.gz ./&& tar -zxf {}.tar.gz  && chmod 755 {}.queue && ./{}.queue'.format(self.executionpath, dirname, dirname, dirname, dirname, dirname, modelname, modelname)
         issmssh(self.name, self.login, self.port, launchcommand)
     # }}}
 
     def Download(self, dirname, filelist):  # {{{
-        if m.ispc():
+        if ispc():
             #do nothing
             return
Index: /issm/trunk/src/m/classes/clusters/generic_static.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic_static.m	(revision 24685)
+++ /issm/trunk/src/m/classes/clusters/generic_static.m	(revision 24686)
@@ -5,11 +5,11 @@
 
 classdef generic_static
-	properties (SetAccess=public) 
+	properties (SetAccess=public)
 		% {{{
 		name='';
 		np=1;
 		codepath=fileparts(which('issm.exe'));
-		executionpath = '.';
-		interactive = 1;
+		executionpath='.';
+		interactive=1;
 		shell='/bin/sh';
 		%}}}
@@ -17,5 +17,4 @@
 	methods
 		function cluster=generic_static(varargin) % {{{
-
 			%use provided options to change fields
 			options=pairoptions(varargin{:});
@@ -50,18 +49,10 @@
 		%}}}
 		function BuildQueueScript(cluster,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling) % {{{
+			% Which executable are we calling?
+			executable='issm.exe'; % default
 
-			%Check that issm.exe exists in the right path
-			if ~exist([cluster.codepath '/issm.exe'],'file'),
-				error(['File ' cluster.codepath '/issm.exe does not exist']);
-			end
-
-			%Now process codepath and replace empty spaces with \ to avoid errors in queuing script
-			codepath2=strrep(cluster.codepath,' ','\ ');
-
-			%write queuing script
-			%what is the executable being called?
-			executable='issm.exe';
 			if isdakota,
-				version=IssmConfig('_DAKOTA_VERSION_'); version=str2num(version(1:3));
+				version=IssmConfig('_DAKOTA_VERSION_');
+				version=str2num(version(1:3));
 				if (version>=6),
 					executable='issm_dakota.exe';
@@ -72,24 +63,32 @@
 			end
 
-			%write queuing script 
+			% Check that executable exists at the right path
+			if ~exist([cluster.codepath '/' executable],'file'),
+				error(['File ' cluster.codepath '/' executable ' does not exist']);
+			end
+
+			% Process codepath and prepend empty spaces with \ to avoid errors in queuing script
+			codepath=strrep(cluster.codepath,' ','\ ');
+
+			% Write queueing script
 			fid=fopen([modelname '.queue'],'w');
 			fprintf(fid,'#!%s\n',cluster.shell);
-			fprintf(fid,['%s/mpiexec -np %i %s/%s %s %s %s \n'],codepath2,cluster.np,codepath2,executable,solution,'./',modelname);
+			fprintf(fid,['%s/mpiexec -np %i %s/%s %s %s %s \n'],codepath,cluster.np,codepath,executable,solution,'./',modelname);
 			fclose(fid);
 
-			%in interactive mode, create a run file, and errlog and outlog file
-			fid=fopen([modelname '.errlog'],'w'); fclose(fid);
-			fid=fopen([modelname '.outlog'],'w'); fclose(fid);
+			% Create an errlog and outlog file
+			fid=fopen([modelname '.errlog'],'w');
+			fclose(fid);
+			fid=fopen([modelname '.outlog'],'w');
+			fclose(fid);
 		end
 		%}}}
 		function UploadQueueJob(cluster,modelname,dirname,filelist)% {{{
-
-			%do nothing
+			% Do nothing
+			return;
 		end %}}}
 		function LaunchQueueJob(cluster,modelname,dirname,filelist,restart,batch)% {{{
-
 			if ~ispc,
-
-				%figure out what shell extension we will use:
+				% Figure out which file extension to use
 				if isempty(strfind(cluster.shell,'csh')),
 					shellext='sh';
@@ -106,6 +105,6 @@
 		end %}}}
 		function Download(cluster,dirname,filelist)% {{{
-				%do nothing
-				return;
+			% Do nothing
+			return;
 		end %}}}
 	end
Index: /issm/trunk/src/m/classes/clusters/generic_static.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/generic_static.py	(revision 24686)
+++ /issm/trunk/src/m/classes/clusters/generic_static.py	(revision 24686)
@@ -0,0 +1,160 @@
+import numpy as np
+import socket
+import os
+import math
+import subprocess
+from IssmConfig import IssmConfig
+from issmdir import issmdir
+from pairoptions import pairoptions
+from issmssh import issmssh
+from issmscpin import issmscpin
+from issmscpout import issmscpout
+import MatlabFuncs as m
+
+
+class generic_static(object):
+    """
+    GENERIC cluster class definition
+
+       Usage:
+          cluster = generic_static('name', 'astrid', 'np', 3)
+    """
+
+    def __init__(self, *args):  # {{{
+        codepath = subprocess.check_output(["which", "issm.exe"]).rstrip('\r\n')
+        codepath = codepath.replace('/issm.exe', '')
+
+        self.name = ''
+        self.np = 1
+        self.codepath = codepath
+        self.executionpath = '.'
+        self.interactive = 1
+        self.shell = '/bin/sh'
+
+        #use provided options to change fields
+        options = pairoptions(*args)
+
+        #get name
+        self.name = socket.gethostname()
+
+        #initialize cluster using user settings if provided
+        if os.path.exists(self.name + '_settings.py'):
+            exec(compile(open(self.name + '_settings.py').read(), self.name + '_settings.py', 'exec'), globals())
+
+        #OK get other fields
+        self = options.AssignObjectFields(self)
+    # }}}
+
+    def __repr__(self):  # {{{
+        #  display the object
+        s = "class '%s' object '%s' = \n" % (type(self), 'self')
+        s += "    name: %s\n" % self.name
+        s += "    np: %i\n" % self.np
+        s += "    codepath: %s\n" % self.codepath
+        s += "    shell: %s\n" % self.shell
+        return s
+    # }}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        if self.np < 1:
+            md = checkmessage(md, 'number of processors should be at least 1')
+        if math.isnan(self.np):
+            md = checkmessage(md, 'number of processors should not be NaN!')
+
+        return md
+    # }}}
+    def BuildQueueScript(self, dirname, modelname, solution, io_gather, isvalgrind, isgprof, isdakota, isoceancoupling):  # {{{
+        # Which executable are we calling?
+        executable = 'issm.exe' # default
+
+        if isdakota:
+            version = IssmConfig('_DAKOTA_VERSION_')
+            version = float(version[0])
+            if version >= 6:
+                executable = 'issm_dakota.exe'
+        if isoceancoupling:
+            executable = 'issm_ocean.exe'
+
+        # Check that executable exists at the right path
+        if not os.path.isfile(self.codepath + '/' + executable):
+            raise RuntimeError('File ' + self.codepath + '/' + executable + ' does not exist')
+
+        # Process codepath and prepend empty spaces with \ to avoid errors in queuing script
+        codepath = self.codepath.replace(' ', '\ ')
+
+        # Write queueing script
+        fid = open(modelname + '.queue', 'w')
+        fid.write('#!{}'.format(self.shell) + '\n')
+        fid.write('{}/mpiexec -np {} {}/{} {} {} {}'.format(codepath, self.np, codepath, executable, solution, './', modelname))
+        fid.close()
+
+        # Set permissions on queue script so that it can be run
+        subprocess.call(['chmod', '0755', modelname + '.queue'])
+
+        # Create an errlog and outlog file
+        fid = open(modelname + '.errlog', 'w')
+        fid.close()
+        fid = open(modelname + '.outlog', 'w')
+        fid.close()
+    # }}}
+
+    def BuildKrigingQueueScript(self, modelname, solution, io_gather, isvalgrind, isgprof):  # {{{
+        # Which executable are we calling?
+        executable = 'kriging.exe' # default
+
+        if isdakota:
+            version = IssmConfig('_DAKOTA_VERSION_')
+            version = float(version[0])
+            if version >= 6:
+                executable = 'issm_dakota.exe'
+        if isoceancoupling:
+            executable = 'issm_ocean.exe'
+
+        # Check that executable exists at the right path
+        if not os.path.isfile(self.codepath + '/' + executable):
+            raise RuntimeError('File ' + self.codepath + '/' + executable + ' does not exist')
+
+        # Process codepath and prepend empty spaces with \ to avoid errors in queuing script
+        codepath = self.codepath.replace(' ', '\ ')
+
+        # Write queueing script
+        fid = open(modelname + '.queue', 'w')
+        fid.write('#!{}'.format(self.shell) + '\n')
+        fid.write('{}/mpiexec -np {} {}/{} {} {} {}'.format(codepath, self.np, codepath, executable, solution, './', modelname) + '\n')
+        fid.close()
+
+        # Set permissions on queue script so that it can be run
+        subprocess.call(['chmod', '0755', modelname + '.queue'])
+
+        # Create an errlog and outlog file
+        fid = open(modelname + '.errlog', 'w')
+        fid.close()
+        fid = open(modelname + '.outlog', 'w')
+        fid.close()
+    # }}}
+
+    def UploadQueueJob(self, modelname, dirname, filelist):  # {{{
+        # Do nothing
+        return
+    # }}}
+
+    def LaunchQueueJob(self, modelname, dirname, filelist, restart, batch):  # {{{
+        if not ispc():
+            # Figure out which file extension to use
+            if self.shell.find('csh') == -1:
+                shellext='sh'
+            else:
+                shellext='csh'
+
+            print('launching solution sequence')
+            launchcommand = './' + modelname + '.queue'
+            subprocess.call([launchcommand])
+        else:
+            launchcommand = './' + modelname + '.bat'
+            subprocess.call([launchcommand])
+    # }}}
+
+    def Download(self, dirname, filelist):  # {{{
+        # Do nothing
+        return
+    # }}}
Index: /issm/trunk/src/m/classes/clusters/pfe.m
===================================================================
--- /issm/trunk/src/m/classes/clusters/pfe.m	(revision 24685)
+++ /issm/trunk/src/m/classes/clusters/pfe.m	(revision 24686)
@@ -165,4 +165,5 @@
 			 for i=1:numel(cluster.modules), fprintf(fid,['module load ' cluster.modules{i} '\n']); end
 			 fprintf(fid,'export PATH="$PATH:."\n\n');
+			 fprintf(fid,'export MPI_LAUNCH_TIMEOUT=520\n');
 			 fprintf(fid,'export MPI_GROUP_MAX=64\n\n');
 			 fprintf(fid,'export ISSM_DIR="%s/../"\n',cluster.codepath); %FIXME
@@ -170,5 +171,5 @@
 			 fprintf(fid,'cd %s/%s/\n\n',cluster.executionpath,dirname);
 			 if ~isvalgrind,
-				 fprintf(fid,'mpiexec -np %i %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
+				 fprintf(fid,'/u/scicon/tools/bin/several_tries mpiexec -np %i /u/scicon/tools/bin/mbind.x -cs -n%i %s/%s %s %s %s\n',cluster.np,cluster.cpuspernode,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
 			 else
 				 fprintf(fid,'mpiexec -np %i valgrind --leak-check=full %s/%s %s %s %s\n',cluster.np,cluster.codepath,executable,solution,[cluster.executionpath '/' dirname],modelname);
Index: /issm/trunk/src/m/classes/clusters/pfe.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/pfe.py	(revision 24685)
+++ /issm/trunk/src/m/classes/clusters/pfe.py	(revision 24686)
@@ -46,4 +46,5 @@
         self = pfe_settings(self)
         self.np = self.nprocs()
+
         #OK get other fields
         self = options.AssignObjectFields(self)
Index: /issm/trunk/src/m/classes/clusters/stallo.py
===================================================================
--- /issm/trunk/src/m/classes/clusters/stallo.py	(revision 24685)
+++ /issm/trunk/src/m/classes/clusters/stallo.py	(revision 24686)
@@ -108,6 +108,7 @@
         m, s = divmod(timeobj.total_seconds(), 60)
         h, m = divmod(m, 60)
-        d, h = divmod(h, 60)
+        d, h = divmod(h, 24)
         timestring = "%02d-%02d:%02d:%02d" % (d, h, m, s)
+        print('timestring')
         fid = open(modelname + '.queue', 'w')
         fid.write('#!/bin/bash -l\n')
Index: /issm/trunk/src/m/classes/dsl.js
===================================================================
--- /issm/trunk/src/m/classes/dsl.js	(revision 24686)
+++ /issm/trunk/src/m/classes/dsl.js	(revision 24686)
@@ -0,0 +1,55 @@
+//dsl Class definition
+//
+//   Usage:
+//      dsl=dsl();
+
+function dsl(){
+	//methods
+	this.setdefaultparameters = function(){// {{{
+		this.requested_outputs=['default'];
+	} // }}}
+	this.disp = function(){ // {{{
+		console.log(sprintf('   dsl parameters:'));
+		fielddisplay(this,'global_average_thermosteric_sea_level_change','corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)');
+		fielddisplay(this,'sea_surface_height_change_above_geoid','corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)');
+		fielddisplay(this,'sea_water_pressure_change_at_sea_floor','corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr)');
+
+
+	} // }}}
+	this.defaultoutputs = function(){ // {{{
+		return '';
+	}//}}}
+    this.classname = function(){ // {{{
+        return "dsl";
+    } // }}}
+    this.extrude = function(md) {//{{{
+        this.sea_surface_height_change_above_geoid=project3d(md,'vector',this.sea_surface_height_change_above_geoid,'type','node');
+        this.sea_water_pressure_change_at_sea_floor=project3d(md,'vector',this.sea_water_pressure_change_at_sea_floor,'type','node');
+        return this;
+    }//}}}
+    this.checkconsistency = function(md,solution,analyses) { //{{{
+
+        if(ArrayAnyEqual(ArrayIsMember('SealevelriseAnalysis',analyses),1)){
+            checkfield(md,'fieldname','dsl.sea_surface_height_change_above_geoid','timeseries',1,'NaN',1,'Inf',1);
+            checkfield(md,'fieldname','dsl.sea_water_pressure_change_at_sea_floor','timeseries',1,'NaN',1,'Inf',1);
+        }
+
+    } // }}}
+    this.marshall=function(md,prefix,fid) { //{{{
+
+        var yts=md.constants.yts;
+
+		WriteData(fid,prefix,'name','md.dsl.model','data',1,'format','Integer');
+		WriteData(fid,prefix,'object',this,'class','dsl','fieldname','global_average_thermosteric_sea_level_change','format','DoubleMat','mattype',1,'timeserieslength',1+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+			WriteData(fid,prefix,'object',this,'class','dsl','fieldname','sea_water_pressure_change_at_sea_floor','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+			WriteData(fid,prefix,'object',this,'class',dsl,'fieldname','sea_surface_height_change_above_geoid','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
+    }//}}}
+    this.fix=function() { //{{{
+    }//}}}
+	//properties 
+    // {{{
+	this.global_average_thermosteric_sea_level_change = NaN;
+	this.sea_surface_height_change_above_geoid = NaN;
+	this.sea_water_pressure_change_at_sea_floor = NaN;
+    // }}}
+}
Index: /issm/trunk/src/m/classes/dsl.m
===================================================================
--- /issm/trunk/src/m/classes/dsl.m	(revision 24686)
+++ /issm/trunk/src/m/classes/dsl.m	(revision 24686)
@@ -0,0 +1,82 @@
+%DSL class definition
+%
+%   Usage:
+%      dsl=dsl(); %dynamic sea level class, based on CMIP5 outputs
+
+classdef dsl
+	properties (SetAccess=public) 
+
+		global_average_thermosteric_sea_level_change; %corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)
+		sea_surface_height_change_above_geoid; %corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)
+		sea_water_pressure_change_at_sea_floor; %corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in mm/yr equivalent, not in Pa/yr!) for each ensemble
+		compute_fingerprints; %do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid
+
+	end
+	methods
+		function self = extrude(self,md) % {{{
+			self.sea_surface_height_change_above_geoid=project3d(md,'vector',self.sea_surface_height_change_above_geoid,'type','node','layer',1);
+			self.sea_water_pressure_change_at_sea_floor=project3d(md,'vector',self.sea_water_pressure_change_at_sea_floor,'type','node','layer',1);
+		end % }}}
+		function self = dsl(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					self=structtoobj(dsl(),varargin{1});
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			self.global_average_thermosteric_sea_level_change=0;
+			self.sea_surface_height_change_above_geoid=NaN;
+			self.sea_water_pressure_change_at_sea_floor=NaN;
+			self.compute_fingerprints=0;
+
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			%Early return
+			if ~ismember('SealevelriseAnalysis',analyses), return; end
+			if (strcmp(solution,'TransientSolution') & md.transient.isslr == 0), return; end
+			md = checkfield(md,'fieldname','dsl.global_average_thermosteric_sea_level_change','NaN',1,'Inf',1);
+			md = checkfield(md,'fieldname','dsl.sea_surface_height_change_above_geoid','NaN',1,'Inf',1,'timeseries',1);
+			md = checkfield(md,'fieldname','dsl.sea_water_pressure_change_at_sea_floor','NaN',1,'Inf',1,'timeseries',1);
+			md = checkfield(md,'fieldname','dsl.compute_fingerprints','NaN',1,'Inf',1,'values',[0,1]);
+			if self.compute_fingerprints,
+				%check geodetic flag of slr is on: 
+				if md.slr.geodetic==0,
+					error('DSL checkconsistency error message: if bottom pressure fingerprints computations are requested, slr class should have geodetic flag on');
+				end
+			end
+
+		end % }}}
+		function disp(self) % {{{
+
+			disp(sprintf('   dsl parameters:'));
+			fielddisplay(self,'global_average_thermosteric_sea_level_change','corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)');
+			fielddisplay(self,'sea_surface_height_change_above_geoid','corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)');
+			fielddisplay(self,'sea_water_pressure_change_at_sea_floor','corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr)');
+			fielddisplay(self,'compute_fingerprints','%do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid');
+
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+
+			WriteData(fid,prefix,'name','md.dsl.model','data',1,'format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','compute_fingerprints','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','global_average_thermosteric_sea_level_change','format','DoubleMat','mattype',1,'timeseries',1,'timeserieslength',2,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','sea_water_pressure_change_at_sea_floor','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','sea_surface_height_change_above_geoid','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+
+		end % }}}
+		function savemodeljs(self,fid,modelname) % {{{
+		
+			writejs1Darray(fid,[modelname '.dsl.global_average_thermosteric_sea_level_change'],self.global_average_thermosteric_sea_level_change);
+			writejs1Darray(fid,[modelname '.dsl.compute_fingerprints'],self.compute_fingerprints);
+			writejs1Darray(fid,[modelname '.dsl.sea_surface_height_change_above_geoid'],self.sea_surface_height_change_above_geoid);
+			writejs1Darray(fid,[modelname '.dsl.sea_water_pressure_change_at_sea_floor'],self.sea_water_pressure_change_at_sea_floor);
+
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/dsl.py
===================================================================
--- /issm/trunk/src/m/classes/dsl.py	(revision 24686)
+++ /issm/trunk/src/m/classes/dsl.py	(revision 24686)
@@ -0,0 +1,69 @@
+import numpy as np
+from fielddisplay import fielddisplay
+from checkfield import checkfield
+from WriteData import WriteData
+from project3d import project3d
+
+
+class dsl(object):
+    """
+    dsl Class definition
+
+       Usage:
+          dsl = dsl()
+    """
+
+    def __init__(self):  # {{{
+        self.global_average_thermosteric_sea_level_change = 0 #corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)
+        self.sea_surface_height_change_above_geoid = float('NaN') #corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)
+        self.sea_water_pressure_change_at_sea_floor = float('NaN') #corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr)
+        self.compute_fingerprints = 0; #do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid
+    #}}}
+
+    def __repr__(self):  # {{{
+        string = "   dsl parameters:"
+        string = "%s\n%s" % (string, fielddisplay(self, 'global_average_thermosteric_sea_level_change', 'corresponds to zostoga field in CMIP5 archives. Specified as a temporally variable global rate (mm/yr)'))
+        string = "%s\n%s" % (string, fielddisplay(self, 'sea_surface_height_change_above_geoid', 'corresponds to zos field in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr)'))
+        string = "%s\n%s" % (string, fielddisplay(self, 'sea_water_pressure_change_at_sea_floor', 'corresponds to bpo field in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr)'))
+        string = "%s\n%s" % (string, fielddisplay(self, 'compute_fingerprints', 'do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid'))
+        return string
+    #}}}
+
+    def extrude(self, md):  # {{{
+        self.sea_surface_height_change_above_geoid = project3d(md, 'vector', self.sea_surface_height_change_above_geoid, 'type', 'node')
+        self.sea_water_pressure_change_at_sea_floor = project3d(md, 'vector', self.sea_water_pressure_change_at_sea_floor, 'type', 'node')
+        return self
+    #}}}
+
+    def defaultoutputs(self, md):  # {{{
+        return []
+    #}}}
+
+    def checkconsistency(self, md, solution, analyses):  # {{{
+        # Early retun
+        if not 'SealevelriseAnalysis' in analyses:
+            return md
+
+        if solution == 'TransientSolution' and md.transient.isslr == 0:
+            return md
+
+        md = checkfield(md, 'fieldname', 'dsl.global_average_thermosteric_sea_level_change', 'NaN', 1, 'Inf', 1)
+        md = checkfield(md, 'fieldname', 'dsl.sea_surface_height_change_above_geoid', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
+        md = checkfield(md, 'fieldname', 'dsl.sea_water_pressure_change_at_sea_floor', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
+        md = checkfield(md, 'fieldname', 'dsl.compute_fingerprints', 'NaN', 1, 'Inf', 1, 'values', [0, 1])
+
+        if self.compute_fingerprints:
+            #check geodetic flag of slr is on:
+            if md.slr.geodetic == 0:
+                raise RuntimeError('DSL checkconsistency error message: if bottom pressure fingerprints computations are requested, slr class should have geodetic flag on')
+        return md
+    # }}}
+
+    def marshall(self, prefix, md, fid):    # {{{
+        yts = md.constants.yts
+        WriteData(fid, prefix, 'name', 'md.dsl.model', 'data', 1, 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'dsl', 'fieldname', 'compute_fingerprints', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'dsl', 'fieldname', 'global_average_thermosteric_sea_level_change', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', 1+1, 'yts', md.constants.yts, 'scale', 1e-3/md.constants.yts)
+        WriteData(fid, prefix, 'object', self, 'class', 'dsl', 'fieldname', 'sea_water_pressure_change_at_sea_floor', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices+1, 'yts', md.constants.yts, 'scale', 1e-3/md.constants.yts)
+        WriteData(fid, prefix, 'object', self, 'class', 'dsl', 'fieldname', 'sea_surface_height_change_above_geoid', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices+1, 'yts', md.constants.yts)
+    # }}}
Index: /issm/trunk/src/m/classes/dslmme.m
===================================================================
--- /issm/trunk/src/m/classes/dslmme.m	(revision 24686)
+++ /issm/trunk/src/m/classes/dslmme.m	(revision 24686)
@@ -0,0 +1,92 @@
+%DSLMME class definition
+%
+%   Usage:
+%      dsl=dslmme(); %dynamic sea level class based on a multi-model ensemble of CMIP5 outputs
+
+classdef dslmme
+	properties (SetAccess=public) 
+
+		modelid; %index into the multi-model ensemble, determine which field will be used.
+		global_average_thermosteric_sea_level_change; %corresponds to zostoga fields in CMIP5 archives. Specified as a temporally variable global rate (mm/yr) for each ensemble.
+		sea_surface_height_change_above_geoid; %corresponds to zos fields in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr) for each ensemble
+		sea_water_pressure_change_at_sea_floor; %corresponds to bpo fields in CMIP5 archives. Specified as a spatio-temporally variable rate (in mm/yr equivalent, not in Pa/yr!) for each ensemble
+		compute_fingerprints; %do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid
+
+	end
+	methods
+		function self = extrude(self,md) % {{{
+			for i=1:length(self.global_average_thermosteric_sea_level_change),
+				self.sea_surface_height_change_above_geoid{i}=project3d(md,'vector',self.sea_surface_height_change_above_geoid{i},'type','node','layer',1);
+				self.sea_water_pressure_change_at_sea_floor{i}=project3d(md,'vector',self.sea_water_pressure_change_at_sea_floor{i},'type','node','layer',1);
+			end
+		end % }}}
+		function self = dslmme(varargin) % {{{
+			switch nargin
+				case 0
+					self=setdefaultparameters(self);
+				case 1
+					self=structtoobj(dsl(),varargin{1});
+				otherwise
+					error('constructor not supported');
+			end
+		end % }}}
+		function self = setdefaultparameters(self) % {{{
+
+			self.modelid=0;
+			self.global_average_thermosteric_sea_level_change={};
+			self.sea_surface_height_change_above_geoid={};
+			self.sea_water_pressure_change_at_sea_floor={};
+			self.compute_fingerprints=0;
+
+		end % }}}
+		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			%Early return
+			if ~ismember('SealevelriseAnalysis',analyses), return; end
+			if (strcmp(solution,'TransientSolution') & md.transient.isslr == 0), return; end
+			for i=1:length(self.global_average_thermosteric_sea_level_change),
+				md = checkfield(md,'field',self.global_average_thermosteric_sea_level_change{i},'NaN',1,'Inf',1);
+				md = checkfield(md,'field',self.sea_surface_height_change_above_geoid{i},'NaN',1,'Inf',1,'timeseries',1);
+				md = checkfield(md,'field',self.sea_water_pressure_change_at_sea_floor{i},'NaN',1,'Inf',1,'timeseries',1);
+			end
+			md = checkfield(md,'field',self.modelid,'NaN',1,'Inf',1,'>=',1,'<=',length(self.global_average_thermosteric_sea_level_change));
+			if self.compute_fingerprints,
+				%check geodetic flag of slr is on: 
+				if md.slr.geodetic==0,
+					error('DSL checkconsistency error message: if bottom pressure fingerprints computations are requested, slr class should have geodetic flag on');
+				end
+			end
+
+
+		end % }}}
+		function disp(self) % {{{
+
+			disp(sprintf('   dsl mme parameters:'));
+			fielddisplay(self,'modelid','index into the multi-model ensemble, determine which field will be used.');
+			fielddisplay(self,'global_average_thermosteric_sea_level_change','corresponds to zostoga fields in CMIP5 archives. Specified as a temporally variable global rate (mm/yr) for each ensemble.');
+			fielddisplay(self,'sea_surface_height_change_above_geoid','corresponds to zos fields in CMIP5 archives. Spatial average is 0. Specified as a spatio-temporally variable rate (mm/yr) for each ensemble.');
+			fielddisplay(self,'sea_water_pressure_change_at_sea_floor','corresponds to bpo fields in CMIP5 archives. Specified as a spatio-temporally variable rate (in Pa/yr) for each ensemble.');
+			fielddisplay(self,'compute_fingerprints','%do we use the sea water pressure change to compute fingerprints and correct sea_surface_height_change_above_geoid');
+		end % }}}
+		function marshall(self,prefix,md,fid) % {{{
+
+			WriteData(fid,prefix,'name','md.dsl.model','data',2,'format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','compute_fingerprints','format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','modelid','format','Integer');
+			WriteData(fid,prefix,'name','md.dsl.nummodels','data',length(self.global_average_thermosteric_sea_level_change),'format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','global_average_thermosteric_sea_level_change','format','MatArray','timeseries',1,'timeserieslength',2,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','sea_water_pressure_change_at_sea_floor','format','MatArray','timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+			WriteData(fid,prefix,'object',self,'fieldname','sea_surface_height_change_above_geoid','format','MatArray','timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
+
+		end % }}}
+		function savemodeljs(self,fid,modelname) % {{{
+			
+			writejsdouble(fid,[modelname '.dsl.modelid'],self.modelid);
+			writejscellarray(fid,[modelname '.dsl.global_average_thermosteric_sea_level_change'],self.global_average_thermosteric_sea_level_change);
+			writejscellarray(fid,[modelname '.dsl.sea_surface_height_change_above_geoid'],self.sea_surface_height_change_above_geoid);
+			writejs1Darray(fid,[modelname '.dsl.compute_fingerprints'],self.compute_fingerprints);
+			writejscellarray(fid,[modelname '.dsl.sea_water_pressure_change_at_sea_floor'],self.sea_water_pressure_change_at_sea_floor);
+
+		end % }}}
+	end
+end
Index: /issm/trunk/src/m/classes/friction.m
===================================================================
--- /issm/trunk/src/m/classes/friction.m	(revision 24685)
+++ /issm/trunk/src/m/classes/friction.m	(revision 24686)
@@ -11,4 +11,5 @@
 		coupling    = 0;
 		effective_pressure = NaN;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -42,4 +43,5 @@
 
 			self.coupling = 0;
+			self.effective_pressure_limit = 0;
 
 		end % }}}
@@ -54,4 +56,5 @@
 			md = checkfield(md,'fieldname','friction.p','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','friction.coupling','numel',[1],'values',[0:4]);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
 			switch self.coupling
 				case 0
@@ -73,4 +76,5 @@
 			fielddisplay(self,'effective_pressure','Effective Pressure for the forcing if not coupled [Pa]');
 			fielddisplay(self,'coupling','Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: use coupled model (not implemented yet)');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -82,4 +86,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','q','format','DoubleMat','mattype',2);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','coupling','format','Integer');
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 			switch self.coupling
 				case 0
@@ -101,5 +106,5 @@
 			writejs1Darray(fid,[modelname '.friction.coupling'],self.coupling);
 			writejs1Darray(fid,[modelname '.friction.effective_pressure'],self.effective_pressure);
-
+			writejs1Darray(fid,[modelname '.friction.effective_pressure_limit'],self.effective_pressure_limit);
 		end % }}}
 	end
Index: /issm/trunk/src/m/classes/friction.py
===================================================================
--- /issm/trunk/src/m/classes/friction.py	(revision 24685)
+++ /issm/trunk/src/m/classes/friction.py	(revision 24686)
@@ -19,5 +19,6 @@
         self.coupling = 0
         self.effective_pressure = float('NaN')
-    #set defaults
+        self.effective_pressure_limit = 0
+        #set defaults
         self.setdefaultparameters()
         self.requested_outputs = []
@@ -32,4 +33,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'coupling', 'Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)'))
         string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]'))
+        string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)'))
         string = "%s\n%s" % (string, fielddisplay(self, 'requested_outputs', 'additional outputs requested'))
         return string
@@ -50,4 +52,5 @@
     def setdefaultparameters(self):  # {{{
         self.requested_outputs = ['default']
+        self.effective_pressure_limit = 0
         return self
     #}}}
@@ -68,4 +71,5 @@
         md = checkfield(md, 'fieldname', 'friction.p', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
         md = checkfield(md, 'fieldname', 'friction.coupling', 'numel', [1], 'values', [0, 1, 2, 3, 4])
+        md = checkfield(md, 'fieldname', 'friction.effective_pressure_limit', 'numel', [1], '>=', 0)
         if self.coupling == 3:
             md = checkfield(md, 'fieldname', 'friction.effective_pressure', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
@@ -82,4 +86,5 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'q', 'format', 'DoubleMat', 'mattype', 2)
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'coupling', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'friction', 'fieldname', 'effective_pressure_limit', 'format', 'Double')
         if self.coupling == 3:
             WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'effective_pressure', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
Index: /issm/trunk/src/m/classes/frictioncoulomb.m
===================================================================
--- /issm/trunk/src/m/classes/frictioncoulomb.m	(revision 24685)
+++ /issm/trunk/src/m/classes/frictioncoulomb.m	(revision 24686)
@@ -12,4 +12,5 @@
 		coupling	= 0;
 		effective_pressure = NaN;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -39,4 +40,6 @@
 		function self = setdefaultparameters(self) % {{{
 
+			self.effective_pressure_limit = 0;
+
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -49,4 +52,5 @@
 			md = checkfield(md,'fieldname','friction.p','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','friction.coupling','numel',[1],'values',[0 1 2]);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);			
 			switch self.coupling
 				case 0
@@ -67,5 +71,6 @@
 			fielddisplay(self,'effective_pressure','Effective Pressure for the forcing if not coupled [Pa]');
 			fielddisplay(self,'coupling','Coupling flag: 0 for default, 1 for forcing(provide md.friction.effective_pressure)  and 2 for coupled(not implemented yet)');
-		end % }}}
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');	
+			end % }}}
 		function marshall(self,prefix,md,fid) % {{{
 
@@ -76,4 +81,5 @@
 			WriteData(fid,prefix,'object',self,'fieldname','q','format','DoubleMat','mattype',2);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','coupling','format','Integer');
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 			switch self.coupling
 				case 0
Index: /issm/trunk/src/m/classes/frictioncoulomb.py
===================================================================
--- /issm/trunk/src/m/classes/frictioncoulomb.py	(revision 24685)
+++ /issm/trunk/src/m/classes/frictioncoulomb.py	(revision 24686)
@@ -20,4 +20,5 @@
         self.coupling = 0
         self.effective_pressure = float('NaN')
+        self.effective_pressure_limit = 0
     #set defaults
         self.setdefaultparameters()
@@ -32,4 +33,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'coupling', 'Coupling flag: 0 for default, 1 for forcing(provide md.friction.effective_pressure)  and 2 for coupled(not implemented yet)'))
         string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]'))
+        string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)'))
         return string
     #}}}
@@ -50,4 +52,5 @@
 
     def setdefaultparameters(self):  # {{{
+        self.effective_pressure_limit = 0
         return self
     #}}}
@@ -62,4 +65,5 @@
         md = checkfield(md, 'fieldname', 'friction.q', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
         md = checkfield(md, 'fieldname', 'friction.p', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
+        md = checkfield(md, 'fieldname', 'friction.effective_pressure_limit', 'numel', [1], '>=', 0)
         if self.coupling == 1:
             md = checkfield(md, 'fieldname', 'friction.effective_pressure', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
@@ -78,4 +82,5 @@
         WriteData(fid, prefix, 'object', self, 'fieldname', 'q', 'format', 'DoubleMat', 'mattype', 2)
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'coupling', 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'class', 'friction', 'fieldname', 'effective_pressure_limit', 'format', 'Double')
         if self.coupling == 1:
             WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'effective_pressure', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
Index: /issm/trunk/src/m/classes/frictionhydro.m
===================================================================
--- /issm/trunk/src/m/classes/frictionhydro.m	(revision 24685)
+++ /issm/trunk/src/m/classes/frictionhydro.m	(revision 24686)
@@ -10,5 +10,6 @@
 		C                  = NaN;
 		As                 = NaN;
-		effective_pressure = NaN; 
+		effective_pressure = NaN;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -23,4 +24,6 @@
 		function self = setdefaultparameters(self) % {{{
 
+			self.effective_pressure_limit = 0;
+
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -32,4 +35,5 @@
 			md = checkfield(md,'fieldname','friction.C','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','friction.As','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
 			switch self.coupling
 				case 0
@@ -64,4 +68,5 @@
 			fielddisplay(self,'As','Sliding Parameter without cavitation [m Pa^-n s^-1]');
 			fielddisplay(self,'effective_pressure','Effective Pressure for the forcing if not coupled [Pa]');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -70,5 +75,6 @@
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','q','format','DoubleMat','mattype',2);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','C','format','DoubleMat','mattype',2);
-			WriteData(fid,prefix,'class','friction','object',self,'fieldname','As','format','DoubleMat','mattype',2);
+			WriteData(fid,prefix,'class','friction','object',self,'fieldname','As','format','DoubleMat','mattype',3);
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 			switch self.coupling
 				case 0
Index: /issm/trunk/src/m/classes/frictionhydro.py
===================================================================
--- /issm/trunk/src/m/classes/frictionhydro.py	(revision 24685)
+++ /issm/trunk/src/m/classes/frictionhydro.py	(revision 24686)
@@ -19,6 +19,6 @@
         self.As = np.nan
         self.effective_pressure = np.nan
-    #set defaults
-
+        self.effective_pressure_limit = 0
+        #set defaults
         self.setdefaultparameters()
     # }}}
@@ -31,4 +31,5 @@
         string = "%s\n%s" % (string, fielddisplay(self, 'As', 'Sliding Parameter without cavitation [m Pa^ - n s^ - 1]'))
         string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure', 'Effective Pressure for the forcing if not coupled [Pa]'))
+        string = "%s\n%s" % (string, fielddisplay(self, 'effective_pressure_limit', 'Neff do not allow to fall below a certain limit: effective_pressure_limit * rho_ice * g * thickness (default 0)'))
 
         return string
@@ -49,5 +50,5 @@
         self.coupling = 0
         self.effective_pressure = np.nan
-
+        self.effective_pressure_limit = 0
         return self
     # }}}
@@ -62,4 +63,5 @@
         md = checkfield(md, 'fieldname', 'friction.C', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
         md = checkfield(md, 'fieldname', 'friction.As', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofelements])
+        md = checkfield(md, 'fieldname', 'friction.effective_pressure_limit', 'numel', [1], '>=', 0)
         if self.coupling == 3:
             md = checkfield(md, 'fieldname', 'friction.effective_pressure', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
@@ -74,4 +76,5 @@
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'C', 'format', 'DoubleMat', 'mattype', 2)
         WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'As', 'format', 'DoubleMat', 'mattype', 2)
+        WriteData(fid, prefix, 'object', self, 'class', 'friction', 'fieldname', 'effective_pressure_limit', 'format', 'Double')
         if self.coupling in[3, 4]:
             WriteData(fid, prefix, 'class', 'friction', 'object', self, 'fieldname', 'effective_pressure', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', md.constants.yts)
Index: /issm/trunk/src/m/classes/frictionjosh.m
===================================================================
--- /issm/trunk/src/m/classes/frictionjosh.m	(revision 24685)
+++ /issm/trunk/src/m/classes/frictionjosh.m	(revision 24686)
@@ -9,4 +9,5 @@
 		pressure_adjusted_temperature = NaN;
 		gamma      = 0.;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -30,4 +31,7 @@
 			self.gamma = 1.;
 
+			%Default 0
+			self.effective_pressure_limit = 0;
+
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -39,7 +43,8 @@
 			md = checkfield(md,'fieldname','friction.pressure_adjusted_temperature','NaN',1,'Inf',1);
 			md = checkfield(md,'fieldname','friction.gamma','numel',1,'NaN',1,'Inf',1,'>',0.);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
 
 			%Check that temperature is provided
-			md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
+			md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size','universal');
 		end % }}}
 		function disp(self) % {{{
@@ -48,4 +53,5 @@
 			fielddisplay(self,'pressure_adjusted_temperature','friction pressure_adjusted_temperature (T - Tpmp) [K]');
 			fielddisplay(self,'gamma','(T - Tpmp)/gamma [K]');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -55,4 +61,5 @@
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','pressure_adjusted_temperature','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','gamma','format','Double');
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 		end % }}}
 	end
Index: /issm/trunk/src/m/classes/frictionschoof.m
===================================================================
--- /issm/trunk/src/m/classes/frictionschoof.m	(revision 24685)
+++ /issm/trunk/src/m/classes/frictionschoof.m	(revision 24686)
@@ -9,4 +9,5 @@
 		Cmax = NaN;
 		m    = NaN;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -22,8 +23,11 @@
 		end % }}}
 		function self = extrude(self,md) % {{{
-			md.friction.C    = project3d(md,'vector',md.friction.C,'type','node','layer',1);
-			md.friction.Cmax = project3d(md,'vector',md.friction.Cmax,'type','node','layer',1);
+			self.C    = project3d(md,'vector',self.C,'type','node');
+			self.Cmax = project3d(md,'vector',self.Cmax,'type','node');
+			self.m    = project3d(md,'vector',self.m,'type','element');
 		end % }}}
 		function self = setdefaultparameters(self) % {{{
+
+			self.effective_pressure_limit = 0;
 
 		end % }}}
@@ -35,4 +39,5 @@
 			md = checkfield(md,'fieldname','friction.Cmax','timeseries',1,'NaN',1,'Inf',1,'>',0.);
 			md = checkfield(md,'fieldname','friction.m','NaN',1,'Inf',1,'>',0.,'size',[md.mesh.numberofelements,1]);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
 		end % }}}
 		function disp(self) % {{{
@@ -47,4 +52,5 @@
 			fielddisplay(self,'Cmax','Iken''s bound (typically between 0.17 and 0.84) [SI]');
 			fielddisplay(self,'m','m exponent (generally taken as m = 1/n = 1/3)');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -55,4 +61,5 @@
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','Cmax','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','m','format','DoubleMat','mattype',2);
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 		end % }}}
 	end
Index: /issm/trunk/src/m/classes/frictiontemp.m
===================================================================
--- /issm/trunk/src/m/classes/frictiontemp.m	(revision 24685)
+++ /issm/trunk/src/m/classes/frictiontemp.m	(revision 24686)
@@ -11,5 +11,6 @@
 		q           = NaN;
 		coupling    = 0;
-                effective_pressure = NaN;
+		effective_pressure = NaN;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -45,4 +46,5 @@
 			self.gamma = 1;
 			self.coupling = 0;
+			self.effective_pressure_limit = 0;
 
 		end % }}}
@@ -55,8 +57,9 @@
 			md = checkfield(md,'fieldname','friction.q','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','friction.p','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
-			md = checkfield(md,'fieldname','friction.gamma','NaN',1,'Inf',1,'numel',1,'>',0.);
+			md = checkfield(md,'fieldname','friction.gamma','NaN',2,'Inf',1,'numel',1,'>',0.);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
 
 			%Check that temperature is provided
-			md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
+			md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size','universal');
 		end % }}}
 		function disp(self) % {{{
@@ -68,4 +71,5 @@
 			fielddisplay(self,'effective_pressure','Effective Pressure for the forcing if not coupled [Pa]');
 			fielddisplay(self,'coupling','Coupling flag 0: uniform sheet (negative pressure ok, default), 1: ice pressure only, 2: water pressure assuming uniform sheet (no negative pressure), 3: use provided effective_pressure, 4: used coupled model (not implemented yet)');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -77,4 +81,5 @@
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','q','format','DoubleMat','mattype',2);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','coupling','format','Integer');
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 			switch self.coupling
 				case 0
Index: /issm/trunk/src/m/classes/frictiontsai.m
===================================================================
--- /issm/trunk/src/m/classes/frictiontsai.m	(revision 24685)
+++ /issm/trunk/src/m/classes/frictiontsai.m	(revision 24686)
@@ -9,4 +9,5 @@
 		f = NaN;
 		m = NaN;
+		effective_pressure_limit = 0;
 	end
 	methods
@@ -27,4 +28,6 @@
 		function self = setdefaultparameters(self) % {{{
 
+			self.effective_pressure_limit = 0;
+
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
@@ -35,4 +38,5 @@
 			md = checkfield(md,'fieldname','friction.f','timeseries',1,'NaN',1,'Inf',1,'>',0.);
 			md = checkfield(md,'fieldname','friction.m','NaN',1,'Inf',1,'>',0.,'size',[md.mesh.numberofelements,1]);
+			md = checkfield(md,'fieldname','friction.effective_pressure_limit','numel',[1],'>=',0);
 		end % }}}
 		function disp(self) % {{{
@@ -47,4 +51,5 @@
 			fielddisplay(self,'f','Iken''s bound (typically between 0.17 and 0.84) [SI]');
 			fielddisplay(self,'m','m exponent (generally taken as m = 1/n = 1/3)');
+			fielddisplay(self,'effective_pressure_limit','Neff do not allow to fall below a certain limit: effective_pressure_limit*rho_ice*g*thickness (default 0)');
 		end % }}}
 		function marshall(self,prefix,md,fid) % {{{
@@ -55,4 +60,5 @@
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','f','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
 			WriteData(fid,prefix,'class','friction','object',self,'fieldname','m','format','DoubleMat','mattype',2);
+			WriteData(fid,prefix,'object',self,'class','friction','fieldname','effective_pressure_limit','format','Double');
 		end % }}}
 	end
Index: /issm/trunk/src/m/classes/frictionweertman.py
===================================================================
--- /issm/trunk/src/m/classes/frictionweertman.py	(revision 24685)
+++ /issm/trunk/src/m/classes/frictionweertman.py	(revision 24686)
@@ -22,5 +22,5 @@
 
     def __repr__(self):  # {{{
-        string = "Weertman sliding law parameters: Sigma_b = C^(-1 / m) * |u_b|^(1 / m - 1) * u_b"
+        string = "Weertman sliding law parameters: Sigma_b = C^(- 1 / m) * |u_b|^(1 / m - 1) * u_b"
 
         string = "%s\n%s" % (string, fielddisplay(self, "C", "friction coefficient [SI]"))
Index: /issm/trunk/src/m/classes/initialization.m
===================================================================
--- /issm/trunk/src/m/classes/initialization.m	(revision 24685)
+++ /issm/trunk/src/m/classes/initialization.m	(revision 24686)
@@ -75,9 +75,9 @@
 				end
 				md = checkfield(md,'fieldname','initialization.pressure','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
-				md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
+				md = checkfield(md,'fieldname','initialization.temperature','NaN',1,'Inf',1,'size','universal');
 			end
 			if (ismember('EnthalpyAnalysis',analyses) & md.thermal.isenthalpy)
-				md = checkfield(md,'fieldname','initialization.waterfraction','>=',0,'size',[md.mesh.numberofvertices 1]);
-				md = checkfield(md,'fieldname','initialization.watercolumn'  ,'>=',0,'size',[md.mesh.numberofvertices 1]);
+				md = checkfield(md,'fieldname','initialization.waterfraction','>=',0,'size','universal');
+				md = checkfield(md,'fieldname','initialization.watercolumn'  ,'>=',0,'size','universal');
 				pos=find(md.initialization.waterfraction>0.);
 				if(~isempty(pos)),
Index: /issm/trunk/src/m/classes/linearbasalforcings.m
===================================================================
--- /issm/trunk/src/m/classes/linearbasalforcings.m	(revision 24685)
+++ /issm/trunk/src/m/classes/linearbasalforcings.m	(revision 24686)
@@ -5,5 +5,5 @@
 
 classdef linearbasalforcings
-	properties (SetAccess=public) 
+	properties (SetAccess=public)
 		deepwater_melting_rate    = 0.;
 		upperwater_melting_rate   = 0.;
@@ -11,4 +11,5 @@
 		upperwater_elevation      = 0.;
 		groundedice_melting_rate  = NaN;
+		perturbation_melting_rate = NaN;
 		geothermalflux            = NaN;
 	end
@@ -25,5 +26,6 @@
 		end % }}}
 		function self = extrude(self,md) % {{{
-			self.groundedice_melting_rate=project3d(md,'vector',self.groundedice_melting_rate,'type','node','layer',1); 
+			self.perturbation_melting_rate=project3d(md,'vector',self.perturbation_melting_rate,'type','node','layer',1);
+			self.groundedice_melting_rate=project3d(md,'vector',self.groundedice_melting_rate,'type','node','layer',1);
 			self.geothermalflux=project3d(md,'vector',self.geothermalflux,'type','node','layer',1); %bedrock only gets geothermal flux
 		end % }}}
@@ -34,4 +36,5 @@
 				disp('      no basalforcings.groundedice_melting_rate specified: values set as zero');
 			end
+			% TODO: Should we be setting self.geothermalflux here (we do so in linearbasalforcings.py)?
 
 		end % }}}
@@ -46,4 +49,8 @@
 		end % }}}
 		function md = checkconsistency(self,md,solution,analyses) % {{{
+
+			if numel(md.basalforcings.perturbation_melting_rate)>1
+				md = checkfield(md,'fieldname','basalforcings.perturbation_melting_rate','NaN',1,'Inf',1,'timeseries',1);
+			end
 
 			if ismember('MasstransportAnalysis',analyses) & ~(strcmp(solution,'TransientSolution') & md.transient.ismasstransport==0),
@@ -78,4 +85,5 @@
 			fielddisplay(self,'upperwater_elevation','elevation of ocean upperwater [m]');
 			fielddisplay(self,'groundedice_melting_rate','basal melting rate (positive if melting) [m/yr]');
+			fielddisplay(self,'perturbation_melting_rate','basal melting rate perturbation added to computed melting rate (positive if melting) [m/yr]');
 			fielddisplay(self,'geothermalflux','geothermal heat flux [W/m^2]');
 
@@ -86,4 +94,5 @@
 
 			WriteData(fid,prefix,'name','md.basalforcings.model','data',2,'format','Integer');
+			WriteData(fid,prefix,'object',self,'fieldname','perturbation_melting_rate','format','DoubleMat','name','md.basalforcings.perturbation_melting_rate','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts)
 			WriteData(fid,prefix,'object',self,'fieldname','groundedice_melting_rate','format','DoubleMat','name','md.basalforcings.groundedice_melting_rate','mattype',1,'scale',1./yts,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts)
 			WriteData(fid,prefix,'object',self,'fieldname','geothermalflux','name','md.basalforcings.geothermalflux','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts);
Index: /issm/trunk/src/m/classes/linearbasalforcings.py
===================================================================
--- /issm/trunk/src/m/classes/linearbasalforcings.py	(revision 24685)
+++ /issm/trunk/src/m/classes/linearbasalforcings.py	(revision 24686)
@@ -17,9 +17,10 @@
         if not len(args):
             print('empty init')
-            self.groundedice_melting_rate = float('NaN')
             self.deepwater_melting_rate = 0.
             self.deepwater_elevation = 0.
             self.upperwater_melting_rate = 0.
             self.upperwater_elevation = 0.
+            self.groundedice_melting_rate = float('NaN')
+            self.perturbation_melting_rate = float('NaN')
             self.geothermalflux = float('NaN')
 
@@ -30,4 +31,5 @@
             inv = args[0]
             self.groundedice_melting_rate = inv.groundedice_melting_rate
+            self.perturbation_melting_rate = float('NaN')
             self.geothermalflux = inv.geothermalflux
             self.deepwater_melting_rate = 0.
@@ -41,5 +43,4 @@
             raise Exception('constructor not supported')
     #}}}
-
     def __repr__(self):  # {{{
         string = "   linear basal forcings parameters:"
@@ -49,8 +50,8 @@
         string = "%s\n%s" % (string, fielddisplay(self, "upperwater_elevation", "elevation of ocean upper water [m]"))
         string = "%s\n%s" % (string, fielddisplay(self, "groundedice_melting_rate", "basal melting rate (positive if melting) [m/yr]"))
+        string = "%s\n%s" % (string, fielddisplay(self, "perturbation_melting_rate", "perturbation applied to computed melting rate (positive if melting) [m/yr]"))
         string = "%s\n%s" % (string, fielddisplay(self, "geothermalflux", "geothermal heat flux [W/m^2]"))
         return string
     #}}}
-
     def initialize(self, md):  # {{{
         if np.all(np.isnan(self.groundedice_melting_rate)):
@@ -59,5 +60,4 @@
         return self
     #}}}
-
     def setdefaultparameters(self):  # {{{
         self.deepwater_melting_rate = 50.0
@@ -68,6 +68,9 @@
         return self
     #}}}
+    def checkconsistency(self, md, solution, analyses):  # {{{
 
-    def checkconsistency(self, md, solution, analyses):  # {{{
+        if not np.all(np.isnan(self.perturbation_melting_rate)):
+            md = checkfield(md, 'fieldname', 'basalforcings.perturbation_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
+
         if 'MasstransportAnalysis' in analyses and not (solution == 'TransientSolution' and not md.transient.ismasstransport):
             md = checkfield(md, 'fieldname', 'basalforcings.groundedice_melting_rate', 'NaN', 1, 'Inf', 1, 'timeseries', 1)
@@ -94,9 +97,9 @@
         return md
     # }}}
-
     def marshall(self, prefix, md, fid):  # {{{
         yts = md.constants.yts
 
         WriteData(fid, prefix, 'name', 'md.basalforcings.model', 'data', 2, 'format', 'Integer')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'perturbation_melting_rate', 'name', 'md.basalforcings.perturbation_melting_rate', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'groundedice_melting_rate', 'name', 'md.basalforcings.groundedice_melting_rate', 'format', 'DoubleMat', 'mattype', 1, 'scale', 1. / yts, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
         WriteData(fid, prefix, 'object', self, 'fieldname', 'geothermalflux', 'name', 'md.basalforcings.geothermalflux', 'format', 'DoubleMat', 'mattype', 1, 'timeserieslength', md.mesh.numberofvertices + 1, 'yts', yts)
Index: /issm/trunk/src/m/classes/model.js
===================================================================
--- /issm/trunk/src/m/classes/model.js	(revision 24685)
+++ /issm/trunk/src/m/classes/model.js	(revision 24686)
@@ -22,4 +22,5 @@
 			console.log(sprintf("//19s: //-22s -- //s","rifts"           ,"[1x1 " + typeof(this.rifts) + "]","rifts properties"));
 			console.log(sprintf("//19s: //-22s -- //s","slr"             ,"[1x1 " + typeof(this.slr) + "]","slr forcings"));
+			console.log(sprintf("//19s: //-22s -- //s","dsl"             ,"[1x1 " + typeof(this.dsl) + "]","dynamic sea level"));
 			console.log(sprintf("//19s: //-22s -- //s","debug"           ,"[1x1 " + typeof(this.debug) + "]","debugging tools (valgrind, gprof)"));
 			console.log(sprintf("//19s: //-22s -- //s","verbose"         ,"[1x1 " + typeof(this.verbose) + "]","verbosity level in solve"));
@@ -66,4 +67,5 @@
 			this.rifts            = new rifts();
 			this.slr              = new slr();
+			this.dsl              = new dsl();
 
 			this.debug            = new debug();
@@ -739,4 +741,5 @@
 		this.rifts            = 0;
 		this.slr              = 0;
+		this.dsl              = 0;
 
 		this.debug            = 0;
Index: /issm/trunk/src/m/classes/model.m
===================================================================
--- /issm/trunk/src/m/classes/model.m	(revision 24685)
+++ /issm/trunk/src/m/classes/model.m	(revision 24686)
@@ -22,4 +22,5 @@
 		initialization   = 0;
 		rifts            = 0;
+		dsl              = 0;
 		slr              = 0;
 
@@ -156,5 +157,5 @@
 			%2019 Jan..
 			if isa(md.frontalforcings,'double');
-				if(~isnan(md.calving.meltingrate))
+				if(isprop('meltingrate',md.calving) & ~isnan(md.calving.meltingrate))
 					disp('Warning: md.calving.meltingrate is now in md.frontalforcings');
 				end
@@ -176,4 +177,8 @@
 					end
 				end
+			end
+			%2019 Dec 16
+			if isa(md.dsl,'double') | isempty(md.dsl.compute_fingerprints)
+				md.dsl=dsl();
 			end
 		end% }}}
@@ -253,4 +258,5 @@
 			if ~isnan(md.inversion.vy_obs), md.inversion.vy_obs=project2d(md,md.inversion.vy_obs,md.mesh.numberoflayers); end;
 			if ~isnan(md.inversion.vel_obs), md.inversion.vel_obs=project2d(md,md.inversion.vel_obs,md.mesh.numberoflayers); end;
+			if ~isnan(md.inversion.thickness_obs), md.inversion.thickness_obs=project2d(md,md.inversion.thickness_obs,md.mesh.numberoflayers); end;
 			if ~isnan(md.inversion.cost_functions_coefficients), md.inversion.cost_functions_coefficients=project2d(md,md.inversion.cost_functions_coefficients,md.mesh.numberoflayers); end;
 			if numel(md.inversion.min_parameters)>1, md.inversion.min_parameters=project2d(md,md.inversion.min_parameters,md.mesh.numberoflayers); end;
@@ -293,5 +299,5 @@
 			md.stressbalance.referential=project2d(md,md.stressbalance.referential,md.mesh.numberoflayers);
 			md.stressbalance.loadingforce=project2d(md,md.stressbalance.loadingforce,md.mesh.numberoflayers);
-			md.masstransport.spcthickness=project2d(md,md.masstransport.spcthickness,md.mesh.numberoflayers);
+			if numel(md.masstransport.spcthickness)>1 md.masstransport.spcthickness=project2d(md,md.masstransport.spcthickness,md.mesh.numberoflayers); end
 			if numel(md.damage.spcdamage)>1, md.damage.spcdamage=project2d(md,md.damage.spcdamage,md.mesh.numberoflayers); end
 			if numel(md.levelset.spclevelset)>1, md.levelset.spclevelset=project2d(md,md.levelset.spclevelset,md.mesh.numberoflayers); end
@@ -728,4 +734,7 @@
 				error('extrude error message');
 			end
+			if numel(md.geometry.base)~=md.mesh.numberofvertices || numel(md.geometry.surface)~=md.mesh.numberofvertices
+				error('model has not been parameterized yet: base and/or surface not set');
+			end
 
 			%Extrude the mesh
@@ -870,4 +879,5 @@
 			md.hydrology = extrude(md.hydrology,md);
 			md.slr = extrude(md.slr,md);
+			md.dsl = extrude(md.dsl,md);
 
 			%connectivity
@@ -1190,4 +1200,5 @@
 			md.rifts            = rifts();
 			md.slr              = slr();
+			md.dsl              = dsl();
 			md.timestepping     = timestepping();
 			md.groundingline    = groundingline();
@@ -1370,4 +1381,5 @@
 			disp(sprintf('%19s: %-22s -- %s','rifts'           ,['[1x1 ' class(self.rifts) ']'],'rifts properties'));
 			disp(sprintf('%19s: %-22s -- %s','slr'             ,['[1x1 ' class(self.slr) ']'],'slr forcings'));
+			disp(sprintf('%19s: %-22s -- %s','dsl'             ,['[1x1 ' class(self.dsl) ']'],'dynamic sea-level '));
 			disp(sprintf('%19s: %-22s -- %s','debug'           ,['[1x1 ' class(self.debug) ']'],'debugging tools (valgrind, gprof)'));
 			disp(sprintf('%19s: %-22s -- %s','verbose'         ,['[1x1 ' class(self.verbose) ']'],'verbosity level in solve'));
Index: /issm/trunk/src/m/classes/model.py
===================================================================
--- /issm/trunk/src/m/classes/model.py	(revision 24685)
+++ /issm/trunk/src/m/classes/model.py	(revision 24686)
@@ -30,4 +30,5 @@
 from rifts import rifts
 from slr import slr
+from dsl import dsl
 from debug import debug
 from verbose import verbose
@@ -93,4 +94,5 @@
         self.initialization = initialization()
         self.rifts = rifts()
+        self.dsl = dsl()
         self.slr = slr()
 
@@ -143,4 +145,5 @@
                 'rifts',
                 'slr',
+                'dsl',
                 'debug',
                 'verbose',
@@ -189,4 +192,5 @@
         string = "%s\n%s" % (string, "%19s: % - 22s - -  %s" % ("rifts", "[%s, %s]" % ("1x1", obj.rifts.__class__.__name__), "rifts properties"))
         string = "%s\n%s" % (string, "%19s: % - 22s - -  %s" % ("slr", "[%s, %s]" % ("1x1", obj.slr.__class__.__name__), "slr forcings"))
+        string = "%s\n%s" % (string, "%19s: % - 22s - -  %s" % ("dsl", "[%s, %s]" % ("1x1", obj.dsl.__class__.__name__), "dynamic sea level"))
         string = "%s\n%s" % (string, "%19s: % - 22s - -  %s" % ("debug", "[%s, %s]" % ("1x1", obj.debug.__class__.__name__), "debugging tools (valgrind, gprof)"))
         string = "%s\n%s" % (string, "%19s: % - 22s - -  %s" % ("verbose", "[%s, %s]" % ("1x1", obj.verbose.__class__.__name__), "verbosity level in solve"))
@@ -741,4 +745,6 @@
         if not np.isnan(md.inversion.vel_obs).all():
             md.inversion.vel_obs = project2d(md, md.inversion.vel_obs, md.mesh.numberoflayers)
+        if not np.isnan(md.inversion.thickness_obs).all():
+            md.inversion.thickness_obs = project2d(md, md.inversion.thickness_obs, md.mesh.numberoflayers)
         if not np.isnan(md.inversion.cost_functions_coefficients).all():
             md.inversion.cost_functions_coefficients = project2d(md, md.inversion.cost_functions_coefficients, md.mesh.numberoflayers)
@@ -805,8 +811,11 @@
         md.stressbalance.referential = project2d(md, md.stressbalance.referential, md.mesh.numberoflayers)
         md.stressbalance.loadingforce = project2d(md, md.stressbalance.loadingforce, md.mesh.numberoflayers)
-        md.masstransport.spcthickness = project2d(md, md.masstransport.spcthickness, md.mesh.numberoflayers)
+        if np.size(md.masstransport.spcthickness) > 1:
+            md.masstransport.spcthickness = project2d(md, md.masstransport.spcthickness, md.mesh.numberoflayers)
+        if np.size(md.damage.spcdamage) > 1 and not np.isnan(md.damage.spcdamage).all():
+            md.damage.spcdamage = project2d(md, md.damage.spcdamage, md.mesh.numberoflayers - 1)
+        if np.size(md.levelset.spclevelset) > 1:
+            md.levelset.spclevelset = project2d(md, md.levelset.spclevelset, md.mesh.numberoflayers - 1)
         md.thermal.spctemperature = project2d(md, md.thermal.spctemperature, md.mesh.numberoflayers - 1)
-        if not np.isnan(md.damage.spcdamage).all():
-            md.damage.spcdamage = project2d(md, md.damage.spcdamage, md.mesh.numberoflayers - 1)
 
         #materials
Index: /issm/trunk/src/m/classes/qmu.m
===================================================================
--- /issm/trunk/src/m/classes/qmu.m	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu.m	(revision 24686)
@@ -5,6 +5,7 @@
 
 classdef qmu
-	properties (SetAccess=public) 
+	properties (SetAccess=public)
 		isdakota                    = 0;
+		output                      = 0;
 		variables                   = struct();
 		responses                   = struct();
@@ -24,4 +25,34 @@
 		vertex_weight               = NaN;
 	end
+	methods (Static)
+		function self = loadobj(self) % {{{
+			% This function is directly called by matlab when a model object is
+			% loaded. If the input is a struct it is an old version of this class and
+			% old fields must be recovered (make sure they are in the deprecated
+			% model properties)
+
+			if verLessThan('matlab','7.9'),
+				disp('Warning: your matlab version is old and there is a risk that load does not work correctly');
+				disp('         if the model is not loaded correctly, rename temporarily loadobj so that matlab does not use it');
+
+				% This is a Matlab bug: all the fields of md have their default value
+				% Example of error message:
+				% Warning: Error loading an object of class 'model':
+				% Undefined function or method 'exist' for input arguments of type 'cell'
+				%
+				% This has been fixed in MATLAB 7.9 (R2009b) and later versions
+			end
+
+			if isstruct(self)
+				disp('Recovering qmu from older version');
+				objstruct = self;
+				self = structtoobj(qmu(),objstruct);
+
+				%2019 Dec 7th
+				if isfield(objstruct,'partition'),      self.vpartition     = objstruct.partition;       end;
+			end
+
+		end% }}}
+	end
 	methods
 		function self = extrude(self,md) % {{{
@@ -56,16 +87,16 @@
 				end
 				if md.cluster.np<=1,
-					md = checkmessage(md,['in parallel library mode, Dakota needs to run on at least 2 cpus, 1 cpu for the master, 1 cpu for the slave. Modify md.cluser.np accordingly.']);
-				end
-					
+					md = checkmessage(md,['in parallel library mode, Dakota needs to run on at least 2 cpus, 1 cpu for the master, 1 cpu for the slave. Modify md.cluster.np accordingly.']);
+				end
+
 				if self.params.processors_per_evaluation<1,
 					md = checkmessage(md,['in parallel library mode, Dakota needs to run at least one slave on one cpu (md.qmu.params.processors_per_evaluation >=1)!']);
 				end
-				if mod(md.cluster.np-1,self.params.processors_per_evaluation), 
+				if mod(md.cluster.np-1,self.params.processors_per_evaluation),
 					%md = checkmessage(md,['in parallel library mode, the requirement is for md.cluster.np = md.qmu.params.processors_per_evaluation * number_of_slaves, where number_of_slaves will automatically be determined by Dakota. Modify md.cluster.np accordingly']);
 				end
 			end
 			if ~isempty(md.qmu.vpartition) & ~any(isnan(md.qmu.vpartition)),
-				if size(md.qmu.vpartition,1)~=md.mesh.numberofvertices  
+				if size(md.qmu.vpartition,1)~=md.mesh.numberofvertices
 					md = checkmessage(md,['user supplied vertex partition for qmu analysis should have size md.mesh.numberofvertices x 1']);
 				end
@@ -78,5 +109,5 @@
 			end
 			if ~isempty(md.qmu.epartition) & ~any(isnan(md.qmu.epartition)),
-				if size(md.qmu.epartition,1)~=md.mesh.numberofelements, 
+				if size(md.qmu.epartition,1)~=md.mesh.numberofelements,
 					md = checkmessage(md,['user supplied element partition for qmu analysis should have size md.mesh.numberofelements x 1']);
 				end
@@ -97,4 +128,5 @@
 
 			fielddisplay(self,'isdakota','is qmu analysis activated?');
+			fielddisplay(self,'output','are we outputting ISSM results, default is 0');
 			for i=1:numel(self.variables)
 				disp(sprintf('         variables%s:  (arrays of each variable class)',...
@@ -125,5 +157,5 @@
 				end
 			end
-			fielddisplay(self,'numberofresponses','number of responses') 
+			fielddisplay(self,'numberofresponses','number of responses')
 			for i=1:numel(self.method);
 				if strcmp(class(self.method(i)),'dakota_method')
@@ -162,5 +194,5 @@
 			fielddisplay(self,'vpartition','user provided mesh partitioning (vertex based)');
 			fielddisplay(self,'epartition','user provided mesh partitioning (element based)');
-			fielddisplay(self,'numberofpartitions','number of partitions for semi-discrete qmu') 
+			fielddisplay(self,'numberofpartitions','number of partitions for semi-discrete qmu')
 			fielddisplay(self,'variabledescriptors','');
 			fielddisplay(self,'responsedescriptors','');
@@ -175,7 +207,8 @@
 		function marshall(self,prefix,md,fid) % {{{
 			WriteData(fid,prefix,'object',self,'fieldname','isdakota','format','Boolean');
-			if ~self.isdakota, 
+			WriteData(fid,prefix,'object',self,'fieldname','output','format','Boolean');
+			if ~self.isdakota,
 				WriteData(fid,prefix,'data',false,'name','md.qmu.mass_flux_segments_present','format','Boolean');
-				return; 
+				return;
 			end
 			WriteData(fid,prefix,'object',self,'fieldname','vpartition','format','DoubleMat','mattype',2);
@@ -185,14 +218,14 @@
 			WriteData(fid,prefix,'object',self,'fieldname','variabledescriptors','format','StringArray');
 			WriteData(fid,prefix,'object',self,'fieldname','responsedescriptors','format','StringArray');
-			if ~isempty(self.mass_flux_segments), 
+			if ~isempty(self.mass_flux_segments),
 				WriteData(fid,prefix,'data',self.mass_flux_segments,'name','md.qmu.mass_flux_segments','format','MatArray');
-				flag=true; 
-			else 
-				flag=false; 
+				flag=true;
+			else
+				flag=false;
 			end
 			WriteData(fid,prefix,'data',flag,'name','md.qmu.mass_flux_segments_present','format','Boolean');
 		end % }}}
 		function savemodeljs(self,fid,modelname) % {{{
-		
+
 			if self.isdakota,
 				error('qmu savemodeljs error message: not supported yet!');
Index: /issm/trunk/src/m/classes/qmu.py
===================================================================
--- /issm/trunk/src/m/classes/qmu.py	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu.py	(revision 24686)
@@ -21,4 +21,5 @@
     def __init__(self):  # {{{
         self.isdakota = 0
+        self.output   = 0
         self.variables = OrderedStruct()
         self.responses = OrderedStruct()
@@ -46,4 +47,5 @@
 
         s += "%s\n" % fielddisplay(self, 'isdakota', 'is qmu analysis activated?')
+        s += "%s\n" % fielddisplay(self, 'output', 'are we outputting ISSM results, default is 0')
         maxlen = 0
         s += "         variables:  (arrays of each variable class)\n"
@@ -123,5 +125,4 @@
         return s
     # }}}
-
     def extrude(self, md):  # {{{
         self.vpartition = project3d(md, 'vector', np.transpose(self.vpartition), 'type', 'node')
@@ -129,9 +130,7 @@
         return self
     #}}}
-
     def setdefaultparameters(self):  # {{{
         return self
     #}}}
-
     def checkconsistency(self, md, solution, analyses):  # {{{
         #Early return
@@ -150,5 +149,5 @@
 
             if md.cluster.np <= 1:
-                md.checkmessage('in parallel library mode, Dakota needs to run on at least 2 cpus, 1 cpu for the master, 1 cpu for the slave. Modify md.cluser.np accordingly.')
+                md.checkmessage('in parallel library mode, Dakota needs to run on at least 2 cpus, 1 cpu for the master, 1 cpu for the slave. Modify md.cluster.np accordingly.')
 
             if self.params.processors_per_evaluation < 1:
@@ -179,7 +178,7 @@
         return md
     # }}}
-
     def marshall(self, prefix, md, fid):  # {{{
         WriteData(fid, prefix, 'object', self, 'fieldname', 'isdakota', 'format', 'Boolean')
+        WriteData(fid, prefix, 'object', self, 'fieldname', 'output', 'format', 'Boolean')
         if not self.isdakota:
             WriteData(fid, prefix, 'data', False, 'name', 'md.qmu.mass_flux_segments_present', 'format', 'Boolean')
Index: /issm/trunk/src/m/classes/qmu/continuous_design.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/continuous_design.m	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu/continuous_design.m	(revision 24686)
@@ -211,4 +211,14 @@
 			scale=allequal(scale,1.);
 		end
+		function [abscissas] =prop_abscissas(hbu) % {{{
+            abscissas=[]; 
+        end % }}}
+        function [counts] =prop_counts(hbu) % {{{
+            counts=[]; 
+        end % }}}
+        function [pairs_per_variable] =prop_pairs_per_variable(hbu) % {{{
+			pairs_per_variable=[];
+        end % }}}
+
 	end
 
Index: /issm/trunk/src/m/classes/qmu/continuous_state.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/continuous_state.m	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu/continuous_state.m	(revision 24686)
@@ -176,4 +176,15 @@
             scale=[];
         end
+		function [abscissas] =prop_abscissas(hbu) % {{{
+            abscissas=[]; 
+        end % }}}
+		function [counts] =prop_counts(hbu) % {{{
+            counts=[]; 
+        end % }}}
+        function [pairs_per_variable] =prop_pairs_per_variable(hbu) % {{{
+			pairs_per_variable=[];
+        end % }}}
+
+
     end
 
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dakota_method.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dakota_method.m	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dakota_method.m	(revision 24686)
@@ -0,0 +1,897 @@
+%
+%  definition for the dakota_method class.
+%
+%  [dm]=dakota_method(method)
+%
+%  where the required input is:
+%    method       (char, beginning of method name)
+%
+%  and the output properties and defaults are:
+%    method       (char, full method name, '')
+%    type         (char, type of method, '')
+%    variables    (cell array, applicable variable types, {})
+%    lcspec       (cell array, linear constraint specs, {})
+%    responses    (cell array, applicable response types, {})
+%    ghspec       (cell array, gradient and hessian specs, {})
+%    params       (structure, method-dependent parameters, [])
+%
+%  this class is used to guide the writing of a dakota input
+%  file for the specified dakota_method.
+%
+%  note that zero arguments constructs a default instance; one
+%  argument of the class copies the instance; and one argument
+%  with enough characters to match a unique method constructs
+%  a new instance of that method.
+%
+%  "Copyright 2009, by the California Institute of Technology.
+%  ALL RIGHTS RESERVED. United States Government Sponsorship
+%  acknowledged. Any commercial use must be negotiated with
+%  the Office of Technology Transfer at the California Institute
+%  of Technology.  (J. Schiermeier, NTR 47078)
+%
+%  This software may be subject to U.S. export control laws.
+%  By accepting this  software, the user agrees to comply with
+%  all applicable U.S. export laws and regulations. User has the
+%  responsibility to obtain export licenses, or other export
+%  authority as may be required before exporting such information
+%  to foreign countries or providing access to foreign persons."
+%
+classdef dakota_method
+    properties (SetAccess=private)
+        method   ='';
+        type     ='';
+        variables={};
+        lcspec   ={};
+        responses={};
+        ghspec   ={};
+    end
+    properties
+        params   =struct();
+    end
+
+    methods
+        function [dm]=dakota_method(method)
+
+            switch nargin
+                case 0
+						 %  create a default object
+                case 1
+						 %  copy the object or create the object from the input
+                    if  (nargin == 1) && isa(method,'dakota_method')
+                        dm=method;
+                    else
+                        mlist={...
+									'dot_bfgs',...
+									'dot_frcg',...
+									'dot_mmfd',...
+									'dot_slp',...
+									'dot_sqp',...
+									'npsol_sqp',...
+									'conmin_frcg',...
+									'conmin_mfd',...
+									'optpp_cg',...
+									'optpp_q_newton',...
+									'optpp_fd_newton',...
+									'optpp_newton',...
+									'optpp_pds',...
+									'asynch_pattern_search',...
+									'coliny_cobyla',...
+									'coliny_direct',...
+									'coliny_ea',...
+									'coliny_pattern_search',...
+									'coliny_solis_wets',...
+									'ncsu_direct',...
+									'surrogate_based_local',...
+									'surrogate_based_global',...
+									'moga',...
+									'soga',...
+									'nl2sol',...
+									'nlssol_sqp',...
+									'optpp_g_newton',...
+									'nond_sampling',...
+									'nond_local_reliability',...
+									'nond_global_reliability',...
+									'nond_polynomial_chaos',...
+									'nond_stoch_collocation',...
+									'nond_evidence',...
+									'dace',...
+									'fsu_quasi_mc',...
+									'fsu_cvt',...
+									'vector_parameter_study',...
+									'list_parameter_study',...
+									'centered_parameter_study',...
+									'multidim_parameter_study',...
+									'bayes_calibration',...
+									'polynomial_chaos',...
+                            };
+
+                        mlist2={};
+                        for i=1:length(mlist)
+                            if strncmpi(method,mlist{i},length(method))
+                                mlist2(end+1)=mlist(i);
+                            end
+                        end
+
+								%  check for a unique match in the list of methods
+                        switch length(mlist2)
+                            case 0
+                                error(['Unrecognized method: ''' method '''']);
+                            case 1
+                                dm.method=mlist2{1};
+                            otherwise
+                                error('Non-unique method: ''%s'' matches %s.',...
+                                    method,string_cell(mlist2));
+                        end
+
+								%  assign the default values for the method
+                        switch dm.method
+                            case {'dot_bfgs','dot_frcg'}
+                                dm.type     ='dot';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.optimization_type='minimize';
+                            case {'dot_mmfd',...
+                                  'dot_slp',...
+                                  'dot_sqp'}
+                                dm.type     ='dot';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.optimization_type='minimize';
+
+                            case {'npsol_sqp'}
+                                dm.type     ='npsol';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.verify_level=-1;
+                                dm.params.function_precision=1.e-10;
+                                dm.params.linesearch_tolerance=0.9;
+
+                            case {'conmin_frcg'}
+                                dm.type     ='conmin';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                            case {'conmin_mfd'}
+                                dm.type     ='conmin';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+
+                            case {'optpp_cg'}
+                                dm.type     ='optpp';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.max_step=1000.;
+                                dm.params.gradient_tolerance=1.e-4;
+                            case {'optpp_q_newton',...
+                                  'optpp_fd_newton',...
+                                  'optpp_newton'}
+                                dm.type     ='optpp';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.value_based_line_search=false;
+                                dm.params.gradient_based_line_search=false;
+                                dm.params.trust_region=false;
+                                dm.params.tr_pds=false;
+                                dm.params.max_step=1000.;
+                                dm.params.gradient_tolerance=1.e-4;
+                                dm.params.merit_function='argaez_tapia';
+                                dm.params.central_path=dm.params.merit_function;
+                                dm.params.steplength_to_boundary=0.99995;
+                                dm.params.centering_parameter=0.2;
+                            case {'optpp_pds'}
+                                dm.type     ='optpp';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.search_scheme_size=32;
+
+                            case {'asynch_pattern_search'}
+                                dm.type     ='apps';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_function_evaluations=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.initial_delta=1.0;
+                                dm.params.threshold_delta=0.01;
+                                dm.params.contraction_factor=0.5;
+                                dm.params.solution_target=false;
+                                dm.params.synchronization='nonblocking';
+                                dm.params.merit_function='merit2_smooth';
+                                dm.params.constraint_penalty=1.0;
+                                dm.params.smoothing_factor=1.0;
+
+                            case {'coliny_cobyla'}
+                                dm.type     ='coliny';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.show_misc_options=false;
+                                dm.params.misc_options={};
+                                dm.params.solution_accuracy=-Inf;
+                                dm.params.initial_delta=[];
+                                dm.params.threshold_delta=[];
+                            case {'coliny_direct'}
+                                dm.type     ='coliny';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.show_misc_options=false;
+                                dm.params.misc_options={};
+                                dm.params.solution_accuracy=-Inf;
+                                dm.params.division='major_dimension';
+                                dm.params.global_balance_parameter=0.0;
+                                dm.params.local_balance_parameter=1.e-8;
+                                dm.params.max_boxsize_limit=0.0;
+                                dm.params.min_boxsize_limit=0.0001;
+                                dm.params.constraint_penalty=1000.0;
+                            case {'coliny_ea'}
+                                dm.type     ='coliny';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.show_misc_options=false;
+                                dm.params.misc_options={};
+                                dm.params.solution_accuracy=-Inf;
+                                dm.params.seed=false;
+                                dm.params.population_size=50;
+                                dm.params.initialization_type='unique_random';
+                                dm.params.fitness_type='linear_rank';
+                                dm.params.replacement_type='elitist';
+                                dm.params.random=[];
+                                dm.params.chc=[];
+                                dm.params.elitist=[];
+                                dm.params.new_solutions_generated='population_size - replacement_size';
+                                dm.params.crossover_type='two_point';
+                                dm.params.crossover_rate=0.8;
+                                dm.params.mutation_type='offset_normal';
+                                dm.params.mutation_scale=0.1;
+                                dm.params.mutation_range=1;
+                                dm.params.dimension_ratio=1.0;
+                                dm.params.mutation_rate=1.0;
+                                dm.params.non_adaptive=false;
+                            case {'coliny_pattern_search'}
+                                dm.type     ='coliny';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.show_misc_options=false;
+                                dm.params.misc_options={};
+                                dm.params.solution_accuracy=-Inf;
+                                dm.params.stochastic=false;
+                                dm.params.seed=false;
+                                dm.params.initial_delta=[];
+                                dm.params.threshold_delta=[];
+                                dm.params.constraint_penalty=1.0;
+                                dm.params.constant_penalty=false;
+                                dm.params.pattern_basis='coordinate';
+                                dm.params.total_pattern_size=false;
+                                dm.params.no_expansion=false;
+                                dm.params.expand_after_success=1;
+                                dm.params.contraction_factor=0.5;
+                                dm.params.synchronization='nonblocking';
+                                dm.params.exploratory_moves='basic_pattern';
+                            case {'coliny_solis_wets'}
+                                dm.type     ='coliny';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.show_misc_options=false;
+                                dm.params.misc_options={};
+                                dm.params.solution_accuracy=-Inf;
+                                dm.params.seed=false;
+                                dm.params.initial_delta=[];
+                                dm.params.threshold_delta=[];
+                                dm.params.no_expansion=false;
+                                dm.params.expand_after_success=5;
+                                dm.params.contract_after_failure=3;
+                                dm.params.contraction_factor=0.5;
+                                dm.params.constraint_penalty=1.0;
+                                dm.params.constant_penalty=false;
+
+                            case {'ncsu_direct'}
+                                dm.type     ='ncsu';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};  %  ?
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};  %  ?
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.scaling=false;
+                                dm.params.solution_accuracy=0.;
+                                dm.params.min_boxsize_limit=1.e-8;
+                                dm.params.vol_boxsize_limit=1.e-8;
+
+%                             case {'surrogate_based_local',...
+%                                   'surrogate_based_global'}
+
+                            case {'moga'}
+                                dm.type     ='jega';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.seed=false;
+                                dm.params.log_file='JEGAGlobal.log';
+                                dm.params.population_size=50;
+                                dm.params.print_each_pop=false;
+%                               according to documentation, uses method-independent control
+%                               dm.params.output='normal';
+                                dm.params.initialization_type='unique_random';
+                                dm.params.mutation_type='replace_uniform';
+                                dm.params.mutation_scale=0.15;
+                                dm.params.mutation_rate=0.08;
+                                dm.params.replacement_type='';
+                                dm.params.below_limit=6;
+                                dm.params.shrinkage_percentage=0.9;
+                                dm.params.crossover_type='shuffle_random';
+                                dm.params.multi_point_binary=[];
+                                dm.params.multi_point_parameterized_binary=[];
+                                dm.params.multi_point_real=[];
+                                dm.params.shuffle_random=[];
+                                dm.params.num_parents=2;
+                                dm.params.num_offspring=2;
+                                dm.params.crossover_rate=0.8;
+                                dm.params.fitness_type='';
+                                dm.params.niching_type=false;
+                                dm.params.radial=[0.01];
+                                dm.params.distance=[0.01];
+                                dm.params.metric_tracker=false;
+                                dm.params.percent_change=0.1;
+                                dm.params.num_generations=10;
+                                dm.params.postprocessor_type=false;
+                                dm.params.orthogonal_distance=[0.01];
+                            case {'soga'}
+                                dm.type     ='jega';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'objective_function',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.seed=false;
+                                dm.params.log_file='JEGAGlobal.log';
+                                dm.params.population_size=50;
+                                dm.params.print_each_pop=false;
+                                dm.params.output='normal';
+                                dm.params.initialization_type='unique_random';
+                                dm.params.mutation_type='replace_uniform';
+                                dm.params.mutation_scale=0.15;
+                                dm.params.mutation_rate=0.08;
+                                dm.params.replacement_type='';
+                                dm.params.below_limit=6;
+                                dm.params.shrinkage_percentage=0.9;
+                                dm.params.crossover_type='shuffle_random';
+                                dm.params.multi_point_binary=[];
+                                dm.params.multi_point_parameterized_binary=[];
+                                dm.params.multi_point_real=[];
+                                dm.params.shuffle_random=[];
+                                dm.params.num_parents=2;
+                                dm.params.num_offspring=2;
+                                dm.params.crossover_rate=0.8;
+                                dm.params.fitness_type='merit_function';
+                                dm.params.constraint_penalty=1.0;
+                                dm.params.replacement_type='';
+                                dm.params.convergence_type=false;
+                                dm.params.num_generations=10;
+                                dm.params.percent_change=0.1;
+
+                            case {'nl2sol'}
+                                dm.type     ='lsq';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'least_squares_term'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.scaling=false;
+                                dm.params.function_precision=1.e-10;
+                                dm.params.absolute_conv_tol=-1.;
+                                dm.params.x_conv_tol=-1.;
+                                dm.params.singular_conv_tol=-1.;
+                                dm.params.singular_radius=-1.;
+                                dm.params.false_conv_tol=-1.;
+                                dm.params.initial_trust_radius=-1.;
+                                dm.params.covariance=0;
+                                dm.params.regression_stressbalances=false;
+                            case {'nlssol_sqp'}
+                                dm.type     ='lsq';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'least_squares_term',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.constraint_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.verify_level=-1;
+                                dm.params.function_precision=1.e-10;
+                                dm.params.linesearch_tolerance=0.9;
+                            case {'optpp_g_newton'}
+                                dm.type     ='lsq';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={'linear_inequality_constraint',...
+                                              'linear_equality_constraint'};
+                                dm.responses={'least_squares_term',...
+                                              'nonlinear_inequality_constraint',...
+                                              'nonlinear_equality_constraint'};
+                                dm.ghspec   ={'grad'};
+                                dm.params.max_iterations=false;
+                                dm.params.max_function_evaluations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.output=false;
+                                dm.params.speculative=false;
+                                dm.params.scaling=false;
+                                dm.params.value_based_line_search=false;
+                                dm.params.gradient_based_line_search=false;
+                                dm.params.trust_region=false;
+                                dm.params.tr_pds=false;
+                                dm.params.max_step=1000.;
+                                dm.params.gradient_tolerance=1.e-4;
+                                dm.params.merit_function='argaez_tapia';
+                                dm.params.central_path=dm.params.merit_function;
+                                dm.params.steplength_to_boundary=0.99995;
+                                dm.params.centering_parameter=0.2;
+
+                            case {'nond_sampling'}
+                                dm.type     ='nond';
+                                dm.variables={'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'histogram_bin_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'response_function'};
+                                dm.ghspec   ={};
+%                               not documented, but apparently works
+                                dm.params.output=false;
+                                dm.params.seed=false;
+                                dm.params.fixed_seed=false;
+                                dm.params.rng=false;
+                                dm.params.samples=false;
+                                dm.params.sample_type='lhs';
+                                dm.params.all_variables=false;
+                                dm.params.variance_based_decomp=false;
+                                dm.params.previous_samples=0;
+                            case {'nond_local_reliability'}
+                                dm.type     ='nond';
+                                dm.variables={'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'response_function'};
+                                dm.ghspec   ={'grad'};
+%                               not documented, but may work
+                                dm.params.output=false;
+                                dm.params.max_iterations=false;
+                                dm.params.convergence_tolerance=false;
+                                dm.params.mpp_search=false;
+                                dm.params.sqp=false;
+                                dm.params.nip=false;
+                                dm.params.integration='first_order';
+                                dm.params.refinement=false;
+                                dm.params.samples=0;
+                                dm.params.seed=false;
+                            case {'nond_global_reliability'}
+                                dm.type     ='nond';
+                                dm.variables={'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'response_function'};
+                                dm.ghspec   ={'grad'};
+%                               not documented, but may work
+                                dm.params.output=false;
+                                dm.params.x_gaussian_process=false;
+                                dm.params.u_gaussian_process=false;
+                                dm.params.all_variables=false;
+                                dm.params.seed=false;
+                            case {'nond_polynomial_chaos'}
+                                dm.type     ='nond';
+                                dm.variables={'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'response_function'};
+                                dm.ghspec   ={'grad'};
+%                               not documented, but may work
+                                dm.params.output=false;
+                                dm.params.expansion_order=[];
+                                dm.params.expansion_terms=[];
+                                dm.params.quadrature_order=[];
+                                dm.params.sparse_grid_level=[];
+                                dm.params.expansion_samples=[];
+                                dm.params.incremental_lhs=false;
+                                dm.params.collocation_points=[];
+                                dm.params.collocation_ratio=[];
+                                dm.params.reuse_samples=false;
+                                dm.params.expansion_import_file='';
+                                dm.params.seed=false;
+                                dm.params.fixed_seed=false;
+                                dm.params.samples=0;
+                                dm.params.sample_type='lhs';
+                                dm.params.all_variables=false;
+                            case {'nond_stoch_collocation'}
+                                dm.type     ='nond';
+                                dm.variables={'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'response_function'};
+                                dm.ghspec   ={'grad'};
+%                               not documented, but may work
+                                dm.params.output=false;
+                                dm.params.quadrature_order=[];
+                                dm.params.sparse_grid_level=[];
+                                dm.params.seed=false;
+                                dm.params.fixed_seed=false;
+                                dm.params.samples=0;
+                                dm.params.sample_type='lhs';
+                                dm.params.all_variables=false;
+                            case {'nond_evidence'}
+                                dm.type     ='nond';
+                                dm.variables={'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'response_function'};
+                                dm.ghspec   ={'grad'};
+%                               not documented, but may work
+                                dm.params.output=false;
+                                dm.params.seed=false;
+                                dm.params.samples=10000;
+
+                            case {'dace'}
+                                dm.type     ='dace';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.grid=false;
+                                dm.params.random=false;
+                                dm.params.oas=false;
+                                dm.params.lhs=false;
+                                dm.params.oa_lhs=false;
+                                dm.params.box_behnken=false;
+                                dm.params.central_composite=false;
+                                dm.params.seed=false;
+                                dm.params.fixed_seed=false;
+                                dm.params.samples=false;
+                                dm.params.symbols=false;
+                                dm.params.quality_metrics=false;
+                                dm.params.variance_based_decomp=false;
+                            case {'fsu_quasi_mc'}
+                                dm.type     ='dace';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.halton=false;
+                                dm.params.hammersley=false;
+                                dm.params.samples=0;
+                                dm.params.sequence_start=[0];
+                                dm.params.sequence_leap=[1];
+                                dm.params.prime_base=false;
+                                dm.params.fixed_sequence=false;
+                                dm.params.latinize=false;
+                                dm.params.variance_based_decomp=false;
+                                dm.params.quality_metrics=false;
+                            case {'fsu_cvt'}
+                                dm.type     ='dace';
+                                dm.variables={'continuous_design',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.seed=false;
+                                dm.params.fixed_seed=false;
+                                dm.params.samples=0;
+                                dm.params.num_trials=10000;
+                                dm.params.trial_type='random';
+                                dm.params.latinize=false;
+                                dm.params.variance_based_decomp=false;
+                                dm.params.quality_metrics=false;
+
+                            case {'vector_parameter_study'}
+                                dm.type     ='param';
+                                dm.variables={'continuous_design',...
+                                              'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.output=false;
+                                dm.params.final_point=[];
+                                dm.params.step_length=[];
+                                dm.params.num_steps=[];
+                                dm.params.step_vector=[];
+                                dm.params.num_steps=[];
+                            case {'list_parameter_study'}
+                                dm.type     ='param';
+                                dm.variables={'continuous_design',...
+                                              'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.output=false;
+                                dm.params.list_of_points=[];
+                            case {'centered_parameter_study'}
+                                dm.type     ='param';
+                                dm.variables={'continuous_design',...
+                                              'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.output=false;
+                                dm.params.percent_delta=[];
+                                dm.params.deltas_per_variable=[];
+                            case {'multidim_parameter_study'}
+                                dm.type     ='param';
+                                dm.variables={'continuous_design',...
+                                              'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function'};
+                                dm.ghspec   ={};
+                                dm.params.output=false;
+                                dm.params.partitions=[];
+                            case {'bayes_calibration'}
+                                dm.type     ='bayes';
+                                dm.variables={'continuous_design',...
+                                              'normal_uncertain',...
+                                              'uniform_uncertain',...
+                                              'continuous_state'};
+                                dm.lcspec   ={};
+                                dm.responses={'objective_function',...
+                                              'response_function',...
+															'calibration_function'};
+                                dm.ghspec   ={};
+                                dm.params.queso=false;
+										  dm.params.dream=false;
+										  dm.params.gpmsa=false;
+                                dm.params.samples=0;
+										  dm.params.seed=false;
+										  dm.params.output=false;
+										  dm.params.metropolis_hastings=false;
+										  dm.params.proposal_covariance=false;
+										  dm.params.diagonal=false;
+										  dm.params.values=[];
+									  case {'polynomial_chaos'}
+										  dm.type     ='polynomial_chaos';
+										  dm.params.sparse_grid_level = 3;
+										  dm.params.dimension_adaptive = 'whoops';
+										  dm.responses={'objective_function',...
+											  'response_function',...
+											  'calibration_function'};
+										  dm.variables={'normal_uncertain',...
+											  'uniform_uncertain',...
+											  'continuous_state'};
+                            otherwise
+                                error('Unimplemented method: ''%s''.',dm.method);
+                        end
+
+                    end
+
+%  if more than one argument, issue warning
+
+                otherwise
+                    warning('dakota_method:extra_arg',...
+                        'Extra arguments for object of class ''%s''.',...
+                        class(dm));
+            end
+
+        end
+
+        function []=disp(dm)
+
+%  display the object
+
+            for i=1:numel(dm)
+                disp(sprintf('\nclass ''%s'' object ''%s%s'' = \n',...
+                    class(dm),inputname(1),string_dim(dm,i)));
+                disp(sprintf('       method: ''%s'''  ,dm(i).method));
+                disp(sprintf('         type: ''%s'''  ,dm(i).type));
+                disp(sprintf('    variables: %s'      ,string_cell(dm(i).variables)));
+                disp(sprintf('       lcspec: %s'      ,string_cell(dm(i).lcspec)));
+                disp(sprintf('    responses: %s'      ,string_cell(dm(i).responses)));
+                disp(sprintf('       ghspec: %s\n'    ,string_cell(dm(i).ghspec)));
+
+%  display the parameters within the object
+
+                fnames=fieldnames(dm(i).params);
+                maxlen=0;
+                for j=1:numel(fnames)
+                    maxlen=max(maxlen,length(fnames{j}));
+                end
+
+                for j=1:numel(fnames)
+                    disp(sprintf(['       params.%-' num2str(maxlen+1) 's: %s'],...
+                        fnames{j},any2str(dm(i).params.(fnames{j}))));
+                end
+            end
+
+        end
+    end
+end
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dakota_method.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dakota_method.py	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dakota_method.py	(revision 24686)
@@ -0,0 +1,906 @@
+#move this later
+from helpers import *
+
+from MatlabFuncs import *
+import numpy as np
+
+
+class dakota_method(object):
+    '''
+  definition for the dakota_method class.
+
+  [dm] = dakota_method(method)
+
+  where the required input is:
+    method       (char, beginning of method name)
+
+  and the output properties and defaults are:
+    method       (char, full method name, '')
+    type         (char, type of method, '')
+    variables    (cell array, applicable variable types, [])
+    lcspec       (cell array, linear constraint specs, [])
+    responses    (cell array, applicable response types, [])
+    ghspec       (cell array, gradient and hessian specs, [])
+    params       (structure, method - depent parameters, [])
+
+  this class is used to guide the writing of a dakota input
+  file for the specified dakota_method.
+
+  note that zero arguments constructs a default instance one
+  argument of the class copies the instance and one argument
+  with enough characters to match a unique method constructs
+  a new instance of that method.
+
+  "Copyright 2009, by the California Institute of Technology.
+  ALL RIGHTS RESERVED. United States Government Sponsorship
+  acknowledged. Any commercial use must be negotiated with
+  the Office of Technology Transfer at the California Institute
+  of Technology.  (J. Schiermeier, NTR 47078)
+
+  This software may be subject to U.S. export control laws.
+  By accepting this  software, the user agrees to comply with
+  all applicable U.S. export laws and regulations. User has the
+  responsibility to obtain export licenses, or other export
+  authority as may be required before exporting such np.information
+  to foreign countries or providing access to foreign persons."
+    '''
+
+    def __init__(self, *args):
+        self.method = ''
+        self.type = ''
+        self.variables = []
+        self.lcspec = []
+        self.responses = []
+        self.ghspec = []
+    #properites
+        self.params = struct()
+
+    @staticmethod
+    def dakota_method(*args):
+        dm = dakota_method()
+    #  return a default object
+        if len(args) == 0:
+            return dm
+
+    #  copy the object or create the object from the input
+        elif len(args) == 1:
+            method = args[0]
+
+            #given argument was a method, copy it
+            if isinstance(method, dakota_method):
+                #dm = method
+                object = method
+                for field in object.keys():
+                    if field in vars(dm):
+                        setattr(dm, field, object[field])
+                return dm
+
+    #given argument was a way of constructing a method
+            else:
+                mlist = ['dot_bfgs',
+                         'dot_frcg',
+                         'dot_mmfd',
+                         'dot_slp',
+                         'dot_sqp',
+                         'npsol_sqp',
+                         'conmin_frcg',
+                         'conmin_mfd',
+                         'optpp_cg',
+                         'optpp_q_newton',
+                         'optpp_fd_newton',
+                         'optpp_newton',
+                         'optpp_pds',
+                         'asynch_pattern_search',
+                         'coliny_cobyla',
+                         'coliny_direct',
+                         'coliny_ea',
+                         'coliny_pattern_search',
+                         'coliny_solis_wets',
+                         'ncsu_direct',
+                         'surrogate_based_local',
+                         'surrogate_based_global',
+                         'moga',
+                         'soga',
+                         'nl2sol',
+                         'nlssol_sqp',
+                         'optpp_g_newton',
+                         'nond_sampling',
+                         'nond_local_reliability',
+                         'nond_global_reliability',
+                         'nond_polynomial_chaos',
+                         'nond_stoch_collocation',
+                         'nond_evidence',
+                         'dace',
+                         'fsu_quasi_mc',
+                         'fsu_cvt',
+                         'vector_parameter_study',
+                         'list_parameter_study',
+                         'centered_parameter_study',
+                         'multidim_parameter_study',
+                         'bayes_calibration']
+
+                mlist2 = []
+                for i in range(len(mlist)):
+                    if strncmpi(method, mlist[i], len(method)):
+                        mlist2.append(mlist[i])
+    #  check for a unique match in the list of methods
+                length = len(mlist2)
+                if length == 0:
+                    raise RuntimeError('Unrecognized method: ' + str(method) + '.')
+                elif length == 1:
+                    dm.method = mlist2[0]
+                else:
+                    raise RuntimeError('Non - unique method: ' + str(method) + ' matches ' + string_cell(mlist2))
+
+    #  assign the default values for the method
+    # switch dm.method
+                if dm.method in ['dot_bfgs', 'dot_frcg']:
+                    dm.type = 'dot'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.optimization_type = 'minimize'
+
+                elif dm.method in ['dot_mmfd', 'dot_slp', 'dot_sqp']:
+                    dm.type = 'dot'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.optimization_type = 'minimize'
+
+                elif dm.method == 'npsol_sqp':
+                    dm.type = 'npsol'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.verify_level = -1
+                    dm.params.function_precision = 1.0e-10
+                    dm.params.linesearch_tolerance = 0.9
+
+                elif dm.method == 'conmin_frcg':
+                    dm.type = 'conmin'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+
+                elif dm.method == 'conmin_mfd':
+                    dm.type = 'conmin'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+
+                elif dm.method == 'optpp_cg':
+                    dm.type = 'optpp'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.max_step = 1000.
+                    dm.params.gradient_tolerance = 1.0e-4
+
+                elif dm.method in ['optpp_q_newton',
+                                   'optpp_fd_newton',
+                                   'optpp_newton']:
+                    dm.type = 'optpp'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.value_based_line_search = False
+                    dm.params.gradient_based_line_search = False
+                    dm.params.trust_region = False
+                    dm.params.tr_pds = False
+                    dm.params.max_step = 1000.
+                    dm.params.gradient_tolerance = 1.0e-4
+                    dm.params.merit_function = 'argaez_tapia'
+                    dm.params.central_path = dm.params.merit_function
+                    dm.params.steplength_to_boundary = 0.99995
+                    dm.params.centering_parameter = 0.2
+
+                elif dm.method == 'optpp_pds':
+                    dm.type = 'optpp'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.search_scheme_size = 32
+
+                elif dm.method == 'asynch_pattern_search':
+                    dm.type = 'apps'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_function_evaluations = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.initial_delta = 1.0
+                    dm.params.threshold_delta = 0.01
+                    dm.params.contraction_factor = 0.5
+                    dm.params.solution_target = False
+                    dm.params.synchronization = 'nonblocking'
+                    dm.params.merit_function = 'merit2_smooth'
+                    dm.params.constraint_penalty = 1.0
+                    dm.params.smoothing_factor = 1.0
+
+                elif dm.method == 'coliny_cobyla':
+                    dm.type = 'coliny'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.show_misc_options = False
+                    dm.params.misc_options = []
+                    dm.params.solution_accuracy = -np.inf
+                    dm.params.initial_delta = []
+                    dm.params.threshold_delta = []
+
+                elif dm.method == 'coliny_direct':
+                    dm.type = 'coliny'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.show_misc_options = False
+                    dm.params.misc_options = []
+                    dm.params.solution_accuracy = -np.inf
+                    dm.params.division = 'major_dimension'
+                    dm.params.global_balance_parameter = 0.0
+                    dm.params.local_balance_parameter = 1.0e-8
+                    dm.params.max_boxsize_limit = 0.0
+                    dm.params.min_boxsize_limit = 0.0001
+                    dm.params.constraint_penalty = 1000.0
+
+                elif dm.method == 'coliny_ea':
+                    dm.type = 'coliny'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.show_misc_options = False
+                    dm.params.misc_options = []
+                    dm.params.solution_accuracy = -np.inf
+                    dm.params.seed = False
+                    dm.params.population_size = 50
+                    dm.params.initialization_type = 'unique_random'
+                    dm.params.fitness_type = 'linear_rank'
+                    dm.params.replacement_type = 'elitist'
+                    dm.params.random = []
+                    dm.params.chc = []
+                    dm.params.elitist = []
+                    dm.params.new_solutions_generated = 'population_size-replacement_size'
+                    dm.params.crossover_type = 'two_point'
+                    dm.params.crossover_rate = 0.8
+                    dm.params.mutation_type = 'offset_normal'
+                    dm.params.mutation_scale = 0.1
+                    dm.params.mutation_range = 1
+                    dm.params.dimension_ratio = 1.0
+                    dm.params.mutation_rate = 1.0
+                    dm.params.non_adaptive = False
+
+                elif dm.method == 'coliny_pattern_search':
+                    dm.type = 'coliny'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.show_misc_options = False
+                    dm.params.misc_options = []
+                    dm.params.solution_accuracy = -np.inf
+                    dm.params.stochastic = False
+                    dm.params.seed = False
+                    dm.params.initial_delta = []
+                    dm.params.threshold_delta = []
+                    dm.params.constraint_penalty = 1.0
+                    dm.params.constant_penalty = False
+                    dm.params.pattern_basis = 'coordinate'
+                    dm.params.total_pattern_size = False
+                    dm.params.no_expansion = False
+                    dm.params.expand_after_success = 1
+                    dm.params.contraction_factor = 0.5
+                    dm.params.synchronization = 'nonblocking'
+                    dm.params.exploratory_moves = 'basic_pattern'
+
+                elif dm.method == 'coliny_solis_wets':
+                    dm.type = 'coliny'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.show_misc_options = False
+                    dm.params.misc_options = []
+                    dm.params.solution_accuracy = -np.inf
+                    dm.params.seed = False
+                    dm.params.initial_delta = []
+                    dm.params.threshold_delta = []
+                    dm.params.no_expansion = False
+                    dm.params.expand_after_success = 5
+                    dm.params.contract_after_failure = 3
+                    dm.params.contraction_factor = 0.5
+                    dm.params.constraint_penalty = 1.0
+                    dm.params.constant_penalty = False
+
+                elif dm.method == 'ncsu_direct':
+                    dm.type = 'ncsu'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']  #  ?
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']  #  ?
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.scaling = False
+                    dm.params.solution_accuracy = 0.
+                    dm.params.min_boxsize_limit = 1.0e-8
+                    dm.params.vol_boxsize_limit = 1.0e-8
+
+    #if dm.method in ['surrogate_based_local',
+    #'surrogate_based_global']:
+
+                elif dm.method == 'moga':
+                    dm.type = 'jega'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.seed = False
+                    dm.params.log_file = 'JEGAGlobal.log'
+                    dm.params.population_size = 50
+                    dm.params.print_each_pop = False
+    #according to documentation, uses method - indepent control
+    #dm.params.output = 'normal'
+                    dm.params.initialization_type = 'unique_random'
+                    dm.params.mutation_type = 'replace_uniform'
+                    dm.params.mutation_scale = 0.15
+                    dm.params.mutation_rate = 0.08
+                    dm.params.replacement_type = ''
+                    dm.params.below_limit = 6
+                    dm.params.shrinkage_percentage = 0.9
+                    dm.params.crossover_type = 'shuffle_random'
+                    dm.params.multi_point_binary = []
+                    dm.params.multi_point_parameterized_binary = []
+                    dm.params.multi_point_real = []
+                    dm.params.shuffle_random = []
+                    dm.params.num_parents = 2
+                    dm.params.num_offspring = 2
+                    dm.params.crossover_rate = 0.8
+                    dm.params.fitness_type = ''
+                    dm.params.niching_type = False
+                    dm.params.radial = [0.01]
+                    dm.params.distance = [0.01]
+                    dm.params.metric_tracker = False
+                    dm.params.percent_change = 0.1
+                    dm.params.num_generations = 10
+                    dm.params.postprocessor_type = False
+                    dm.params.orthogonal_distance = [0.01]
+
+                elif dm.method == 'soga':
+                    dm.type = 'jega'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['objective_function',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.seed = False
+                    dm.params.log_file = 'JEGAGlobal.log'
+                    dm.params.population_size = 50
+                    dm.params.print_each_pop = False
+                    dm.params.output = 'normal'
+                    dm.params.initialization_type = 'unique_random'
+                    dm.params.mutation_type = 'replace_uniform'
+                    dm.params.mutation_scale = 0.15
+                    dm.params.mutation_rate = 0.08
+                    dm.params.replacement_type = ''
+                    dm.params.below_limit = 6
+                    dm.params.shrinkage_percentage = 0.9
+                    dm.params.crossover_type = 'shuffle_random'
+                    dm.params.multi_point_binary = []
+                    dm.params.multi_point_parameterized_binary = []
+                    dm.params.multi_point_real = []
+                    dm.params.shuffle_random = []
+                    dm.params.num_parents = 2
+                    dm.params.num_offspring = 2
+                    dm.params.crossover_rate = 0.8
+                    dm.params.fitness_type = 'merit_function'
+                    dm.params.constraint_penalty = 1.0
+                    dm.params.replacement_type = ''
+                    dm.params.convergence_type = False
+                    dm.params.num_generations = 10
+                    dm.params.percent_change = 0.1
+
+                elif dm.method == 'nl2sol':
+                    dm.type = 'lsq'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['least_squares_term']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.scaling = False
+                    dm.params.function_precision = 1.0e-10
+                    dm.params.absolute_conv_tol = -1.
+                    dm.params.x_conv_tol = -1.
+                    dm.params.singular_conv_tol = -1.
+                    dm.params.singular_radius = -1.
+                    dm.params.False_conv_tol = -1.
+                    dm.params.initial_trust_radius = -1.
+                    dm.params.covariance = 0
+                    dm.params.regression_stressbalances = False
+
+                elif dm.method == 'nlssol_sqp':
+                    dm.type = 'lsq'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['least_squares_term',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.constraint_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.verify_level = -1
+                    dm.params.function_precision = 1.0e-10
+                    dm.params.linesearch_tolerance = 0.9
+
+                elif dm.method == 'optpp_g_newton':
+                    dm.type = 'lsq'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = ['linear_inequality_constraint',
+                                 'linear_equality_constraint']
+                    dm.responses = ['least_squares_term',
+                                    'nonlinear_inequality_constraint',
+                                    'nonlinear_equality_constraint']
+                    dm.ghspec = ['grad']
+                    dm.params.max_iterations = False
+                    dm.params.max_function_evaluations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.output = False
+                    dm.params.speculative = False
+                    dm.params.scaling = False
+                    dm.params.value_based_line_search = False
+                    dm.params.gradient_based_line_search = False
+                    dm.params.trust_region = False
+                    dm.params.tr_pds = False
+                    dm.params.max_step = 1000.
+                    dm.params.gradient_tolerance = 1.0e-4
+                    dm.params.merit_function = 'argaez_tapia'
+                    dm.params.central_path = dm.params.merit_function
+                    dm.params.steplength_to_boundary = 0.99995
+                    dm.params.centering_parameter = 0.2
+
+                elif dm.method == 'nond_sampling':
+                    dm.type = 'nond'
+                    dm.variables = ['normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['response_function']
+                    dm.ghspec = []
+    #                               not documented, but apparently works
+                    dm.params.output = False
+                    dm.params.seed = False
+                    dm.params.fixed_seed = False
+                    dm.params.rng = False
+                    dm.params.samples = False
+                    dm.params.sample_type = 'lhs'
+                    dm.params.all_variables = False
+                    dm.params.variance_based_decomp = False
+                    dm.params.previous_samples = 0
+
+                elif dm.method == 'nond_local_reliability':
+                    dm.type = 'nond'
+                    dm.variables = ['normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['response_function']
+                    dm.ghspec = ['grad']
+    #                               not documented, but may work
+                    dm.params.output = False
+                    dm.params.max_iterations = False
+                    dm.params.convergence_tolerance = False
+                    dm.params.mpp_search = False
+                    dm.params.sqp = False
+                    dm.params.nip = False
+                    dm.params.integration = 'first_order'
+                    dm.params.refinement = False
+                    dm.params.samples = 0
+                    dm.params.seed = False
+
+                elif dm.method == 'nond_global_reliability':
+                    dm.type = 'nond'
+                    dm.variables = ['normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['response_function']
+                    dm.ghspec = ['grad']
+    #                               not documented, but may work
+                    dm.params.output = False
+                    dm.params.x_gaussian_process = False
+                    dm.params.u_gaussian_process = False
+                    dm.params.all_variables = False
+                    dm.params.seed = False
+
+                elif dm.method == 'nond_polynomial_chaos':
+                    dm.type = 'nond'
+                    dm.variables = ['normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['response_function']
+                    dm.ghspec = ['grad']
+    #                               not documented, but may work
+                    dm.params.output = False
+                    dm.params.expansion_order = []
+                    dm.params.expansion_terms = []
+                    dm.params.quadrature_order = []
+                    dm.params.sparse_grid_level = []
+                    dm.params.expansion_samples = []
+                    dm.params.incremental_lhs = False
+                    dm.params.collocation_points = []
+                    dm.params.collocation_ratio = []
+                    dm.params.reuse_samples = False
+                    dm.params.expansion_import_file = ''
+                    dm.params.seed = False
+                    dm.params.fixed_seed = False
+                    dm.params.samples = 0
+                    dm.params.sample_type = 'lhs'
+                    dm.params.all_variables = False
+
+                elif dm.method == 'nond_stoch_collocation':
+                    dm.type = 'nond'
+                    dm.variables = ['normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['response_function']
+                    dm.ghspec = ['grad']
+    #                               not documented, but may work
+                    dm.params.output = False
+                    dm.params.quadrature_order = []
+                    dm.params.sparse_grid_level = []
+                    dm.params.seed = False
+                    dm.params.fixed_seed = False
+                    dm.params.samples = 0
+                    dm.params.sample_type = 'lhs'
+                    dm.params.all_variables = False
+
+                elif dm.method == 'nond_evidence':
+                    dm.type = 'nond'
+                    dm.variables = ['normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['response_function']
+                    dm.ghspec = ['grad']
+    #                               not documented, but may work
+                    dm.params.output = False
+                    dm.params.seed = False
+                    dm.params.samples = 10000
+
+                elif dm.method == 'dace':
+                    dm.type = 'dace'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.grid = False
+                    dm.params.random = False
+                    dm.params.oas = False
+                    dm.params.lhs = False
+                    dm.params.oa_lhs = False
+                    dm.params.box_behnken = False
+                    dm.params.central_composite = False
+                    dm.params.seed = False
+                    dm.params.fixed_seed = False
+                    dm.params.samples = False
+                    dm.params.symbols = False
+                    dm.params.quality_metrics = False
+                    dm.params.variance_based_decomp = False
+
+                elif dm.method == 'fsu_quasi_mc':
+                    dm.type = 'dace'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.halton = False
+                    dm.params.hammersley = False
+                    dm.params.samples = 0
+                    dm.params.sequence_start = [0]
+                    dm.params.sequence_leap = [1]
+                    dm.params.prime_base = False
+                    dm.params.fixed_sequence = False
+                    dm.params.latinize = False
+                    dm.params.variance_based_decomp = False
+                    dm.params.quality_metrics = False
+
+                elif dm.method == 'fsu_cvt':
+                    dm.type = 'dace'
+                    dm.variables = ['continuous_design',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.seed = False
+                    dm.params.fixed_seed = False
+                    dm.params.samples = 0
+                    dm.params.num_trials = 10000
+                    dm.params.trial_type = 'random'
+                    dm.params.latinize = False
+                    dm.params.variance_based_decomp = False
+                    dm.params.quality_metrics = False
+
+                elif dm.method == 'vector_parameter_study':
+                    dm.type = 'param'
+                    dm.variables = ['continuous_design',
+                                    'normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.output = False
+                    dm.params.final_point = []
+                    dm.params.step_length = []
+                    dm.params.num_steps = []
+                    dm.params.step_vector = []
+                    dm.params.num_steps = []
+
+                elif dm.method == 'list_parameter_study':
+                    dm.type = 'param'
+                    dm.variables = ['continuous_design',
+                                    'normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.output = False
+                    dm.params.list_of_points = []
+
+                elif dm.method == 'centered_parameter_study':
+                    dm.type = 'param'
+                    dm.variables = ['continuous_design',
+                                    'normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.output = False
+                    dm.params.percent_delta = []
+                    dm.params.deltas_per_variable = []
+
+                elif dm.method == 'multidim_parameter_study':
+                    dm.type = 'param'
+                    dm.variables = ['continuous_design',
+                                    'normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function']
+                    dm.ghspec = []
+                    dm.params.output = False
+                    dm.params.partitions = []
+
+                elif dm.method == 'bayes_calibration':
+                    dm.type = 'bayes'
+                    dm.variables = ['continuous_design',
+                                    'normal_uncertain',
+                                    'uniform_uncertain',
+                                    'continuous_state']
+                    dm.lcspec = []
+                    dm.responses = ['objective_function',
+                                    'response_function',
+                                    'calibration_function']
+                    dm.ghspec = []
+                    dm.params.queso = False
+                    dm.params.dream = False
+                    dm.params.gpmsa = False
+                    dm.params.samples = 0
+                    dm.params.seed = False
+                    dm.params.output = False
+                    dm.params.metropolis_hastings = False
+                    dm.params.proposal_covariance = False
+                    dm.params.diagonal = False
+                    dm.params.values = []
+
+                else:
+                    raise RuntimeError('Unimplemented method: {}.'.format(dm.method))
+
+    #  if more than one argument, issue warning
+        else:
+            print('Warning: dakota_method:extra_arg: Extra arguments for object of class ' + str(type(dm)) + '.')
+        return dm
+
+    def __repr__(dm):
+
+        #  display the object
+        string = '\nclass dakota_method object = \n'
+        string += '       method: ' + str(dm.method) + '\n'
+        string += '         type: ' + str(dm.type) + '\n'
+        string += '    variables: ' + str(dm.variables) + '\n'
+        string += '       lcspec: ' + str(dm.lcspec) + '\n'
+        string += '    responses: ' + str(dm.responses) + '\n'
+        string += '       ghspec: ' + str(dm.ghspec) + '\n'
+
+    #  display the parameters within the object
+
+        fnames = fieldnames(dm.params)
+    #get rid of stuff we aren't using
+        try:
+            fnames.remove('__module__')
+        except ValueError:
+            pass
+
+        maxlen = 0
+        for i in range(len(fnames)):
+            maxlen = max(maxlen, len(fnames[i]))
+
+        for i in fnames:
+            string += '       params.{:{space}s}: {}\n'.format(str(i), str(dm.params.__dict__[i]), space=maxlen + 1)
+    #params.x   : y
+    #with maxlen + 1 spaces between x and :
+        return string
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_merge.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_merge.m	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_merge.m	(revision 24686)
@@ -0,0 +1,27 @@
+%
+%  merge a structure of parameters into a dakota_method object.
+%
+%  [dm]=dmeth_params_merge(dm,params)
+%
+function [dm]=dmeth_params_merge(dm,params)
+
+if ~isa(dm,'dakota_method')
+    error('Object ''%s'' is a ''%s'' class object, not ''%s''.',...
+        inputname(1),class(dm),'dakota_method');
+end
+
+%  loop through each parameter field in the structure
+
+fnames=fieldnames(params);
+
+for i=1:numel(fnames)
+    if isfield(dm.params,fnames{i})
+        dm.params.(fnames{i})=params.(fnames{i});
+    else
+        warning('dmeth_params_merge:unknown_param',...
+            'No parameter ''%s'' for dakota_method ''%s''.',...
+            fnames{i},dm.method);
+    end
+end
+
+end
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_set.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_set.m	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_set.m	(revision 24686)
@@ -0,0 +1,25 @@
+%
+%  set parameters of a dakota_method object.
+%
+%  [dm]=dmeth_params_set(dm,varargin)
+%
+function [dm]=dmeth_params_set(dm,varargin)
+
+if ~isa(dm,'dakota_method')
+    error('Object ''%s'' is a ''%s'' class object, not ''%s''.',...
+        inputname(1),class(dm),'dakota_method');
+end
+
+%  loop through each parameter field in the input list
+
+for i=1:2:length(varargin)
+    if isfield(dm.params,varargin{i})
+        dm.params.(varargin{i})=varargin{i+1};
+    else
+        warning('dmeth_params_set:unknown_param',...
+            'No parameter ''%s'' for dakota_method ''%s''.',...
+            varargin{i},dm.method);
+    end
+end
+
+end
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_set.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_set.py	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_set.py	(revision 24686)
@@ -0,0 +1,24 @@
+from helpers import *
+from dakota_method import *
+
+
+def dmeth_params_set(dm, *args):
+    #
+    #  set parameters of a dakota_method object.
+    #
+    #  dm = dmeth_params_set(dm, *args)
+    #
+
+    if not isinstance(dm, dakota_method):
+        raise RuntimeError('Provided object is a \'' + str(type(dm)) + '\' class object, not \'dakota_method\'')
+
+    #  loop through each parameter field in the input list
+    for i in range(0, len(args), 2):
+        if isfield(dm.params, args[i]):
+            #vars(dresp)[fnames[i]]
+            exec(('dm.params.%s = args[i + 1]') % (args[i]))
+    #vars(dm.params)[args[i]] = args[i + 1]
+        else:
+            print('WARNING: dmeth_params_set:unknown_param No parameter \'' + str(args[i]) + '\' for dakota_method \'' + str(dm.method) + '\'.')
+
+    return dm
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_write.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_write.m	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_write.m	(revision 24686)
@@ -0,0 +1,603 @@
+function dmeth_params_write(dm,fid,sbeg)
+%DMETH_PARAMS_WRITE - write the parameters from a dakota_method object
+%
+%   Usage:
+%      dmeth_params_write(dm,fid,sbeg)
+%
+
+if ~isa(dm,'dakota_method')
+    error('Object ''%s'' is a ''%s'' class object, not ''%s''.',...
+        inputname(1),class(dm),'dakota_method');
+end
+
+if ~exist('sbeg','var')
+    sbeg='\t  ';
+end
+
+%  perform some error checking, but leave the rest to dakota.
+%  unfortunately this prevents merely looping through the fields
+%  of the parameters structure.
+
+%  write method-independent controls
+
+% param_write(fid,sbeg,'id_method','                = ','\n',dm.params);
+% param_write(fid,sbeg,'model_pointer','            = ','\n',dm.params);
+
+%  write method-dependent controls
+
+switch dm.type
+    case {'dot'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+        param_write(fid,sbeg,'constraint_tolerance','     = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'speculative','','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+        switch dm.method
+            case{'dot_bfgs',...
+                 'dot_frcg',...
+                 'dot_mmfd',...
+                 'dot_slp',...
+                 'dot_sqp'}
+                param_write(fid,sbeg,'optimization_type',' = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'npsol'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+        param_write(fid,sbeg,'constraint_tolerance','     = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'speculative','','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+        switch dm.method
+            case {'npsol_sqp'}
+                param_write(fid,sbeg,'verify_level','         = ','\n',dm.params);
+                param_write(fid,sbeg,'function_precision','   = ','\n',dm.params);
+                param_write(fid,sbeg,'linesearch_tolerance',' = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'conmin'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+        param_write(fid,sbeg,'constraint_tolerance','     = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'speculative','','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+        switch dm.method
+            case {'conmin_frcg',...
+                  'conmin_mfd'}
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'optpp'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'speculative','','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+        switch dm.method
+            case {'optpp_cg'}
+                param_write(fid,sbeg,'max_step','           = ','\n',dm.params);
+                param_write(fid,sbeg,'gradient_tolerance',' = ','\n',dm.params);
+
+            case {'optpp_q_newton',...
+                  'optpp_fd_newton',...
+                  'optpp_newton'}
+                if (dm.params.value_based_line_search + ...
+                    dm.params.gradient_based_line_search + ...
+                    dm.params.trust_region + ...
+                    dm.params.tr_pds > 1)
+                    error('''%s'' method must have only one algorithm.',...
+                        dm.method);
+                end
+                param_write(fid,sbeg,'value_based_line_search','','\n',dm.params);
+                param_write(fid,sbeg,'gradient_based_line_search','','\n',dm.params);
+                param_write(fid,sbeg,'trust_region','','\n',dm.params);
+                param_write(fid,sbeg,'tr_pds','','\n',dm.params);
+                param_write(fid,sbeg,'max_step','               = ','\n',dm.params);
+                param_write(fid,sbeg,'gradient_tolerance','     = ','\n',dm.params);
+                param_write(fid,sbeg,'merit_function','         = ','\n',dm.params);
+                param_write(fid,sbeg,'central_path','           = ','\n',dm.params);
+                param_write(fid,sbeg,'steplength_to_boundary',' = ','\n',dm.params);
+                param_write(fid,sbeg,'centering_parameter','    = ','\n',dm.params);
+
+            case {'optpp_pds'}
+                param_write(fid,sbeg,'search_scheme_size',' = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'apps'}
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'constraint_tolerance','     = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+        switch dm.method
+            case {'asynch_pattern_search'}
+                param_write(fid,sbeg,'initial_delta','      = ','\n',dm.params);
+                param_write(fid,sbeg,'threshold_delta','    = ','\n',dm.params);
+                param_write(fid,sbeg,'contraction_factor',' = ','\n',dm.params);
+                param_write(fid,sbeg,'solution_target','    = ','\n',dm.params);
+                param_write(fid,sbeg,'synchronization','    = ','\n',dm.params);
+                param_write(fid,sbeg,'merit_function','     = ','\n',dm.params);
+                param_write(fid,sbeg,'constraint_penalty',' = ','\n',dm.params);
+                param_write(fid,sbeg,'smoothing_factor','   = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'coliny'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+
+        param_write(fid,sbeg,'show_misc_options','','\n',dm.params);
+        param_write(fid,sbeg,'misc_options','      = ','\n',dm.params);
+        param_write(fid,sbeg,'solution_accuracy',' = ','\n',dm.params);
+        switch dm.method
+            case {'coliny_cobyla'}
+                param_write(fid,sbeg,'initial_delta','   = ','\n',dm.params);
+                param_write(fid,sbeg,'threshold_delta',' = ','\n',dm.params);
+
+            case {'coliny_direct'}
+                param_write(fid,sbeg,'division','                 = ','\n',dm.params);
+                param_write(fid,sbeg,'global_balance_parameter',' = ','\n',dm.params);
+                param_write(fid,sbeg,'local_balance_parameter','  = ','\n',dm.params);
+                param_write(fid,sbeg,'max_boxsize_limit','        = ','\n',dm.params);
+                param_write(fid,sbeg,'min_boxsize_limit','        = ','\n',dm.params);
+                param_write(fid,sbeg,'constraint_penalty','       = ','\n',dm.params);
+
+            case {'coliny_ea'}
+                param_write(fid,sbeg,'seed','                    = ','\n',dm.params);
+                param_write(fid,sbeg,'population_size','         = ','\n',dm.params);
+                param_write(fid,sbeg,'initialization_type','     = ','\n',dm.params);
+                param_write(fid,sbeg,'fitness_type','            = ','\n',dm.params);
+                param_write(fid,sbeg,'replacement_type','        = ','\n',dm.params);
+                param_write(fid,sbeg,'random','                  = ','\n',dm.params);
+                param_write(fid,sbeg,'chc','                     = ','\n',dm.params);
+                param_write(fid,sbeg,'elitist','                 = ','\n',dm.params);
+                param_write(fid,sbeg,'new_solutions_generated',' = ','\n',dm.params);
+                param_write(fid,sbeg,'crossover_type','          = ','\n',dm.params);
+                param_write(fid,sbeg,'crossover_rate','          = ','\n',dm.params);
+                param_write(fid,sbeg,'mutation_type','           = ','\n',dm.params);
+                param_write(fid,sbeg,'mutation_scale','          = ','\n',dm.params);
+                param_write(fid,sbeg,'mutation_range','          = ','\n',dm.params);
+                param_write(fid,sbeg,'dimension_ratio','         = ','\n',dm.params);
+                param_write(fid,sbeg,'mutation_rate','           = ','\n',dm.params);
+                param_write(fid,sbeg,'non_adaptive','','\n',dm.params);
+
+            case {'coliny_pattern_search'}
+                param_write(fid,sbeg,'stochastic','','\n',dm.params);
+                param_write(fid,sbeg,'seed','                 = ','\n',dm.params);
+                param_write(fid,sbeg,'initial_delta','        = ','\n',dm.params);
+                param_write(fid,sbeg,'threshold_delta','      = ','\n',dm.params);
+                param_write(fid,sbeg,'constraint_penalty','   = ','\n',dm.params);
+                param_write(fid,sbeg,'constant_penalty','','\n',dm.params);
+                param_write(fid,sbeg,'pattern_basis','        = ','\n',dm.params);
+                param_write(fid,sbeg,'total_pattern_size','   = ','\n',dm.params);
+                param_write(fid,sbeg,'no_expansion','','\n',dm.params);
+                param_write(fid,sbeg,'expand_after_success',' = ','\n',dm.params);
+                param_write(fid,sbeg,'contraction_factor','   = ','\n',dm.params);
+                param_write(fid,sbeg,'synchronization','      = ','\n',dm.params);
+                param_write(fid,sbeg,'exploratory_moves','    = ','\n',dm.params);
+
+            case {'coliny_solis_wets'}
+                param_write(fid,sbeg,'seed','                   = ','\n',dm.params);
+                param_write(fid,sbeg,'initial_delta','          = ','\n',dm.params);
+                param_write(fid,sbeg,'threshold_delta','        = ','\n',dm.params);
+                param_write(fid,sbeg,'no_expansion','','\n',dm.params);
+                param_write(fid,sbeg,'expand_after_success','   = ','\n',dm.params);
+                param_write(fid,sbeg,'contract_after_failure',' = ','\n',dm.params);
+                param_write(fid,sbeg,'contraction_factor','     = ','\n',dm.params);
+                param_write(fid,sbeg,'constraint_penalty','     = ','\n',dm.params);
+                param_write(fid,sbeg,'constant_penalty','','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'ncsu'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+        switch dm.method
+            case {'ncsu_direct'}
+                param_write(fid,sbeg,'solution_accuracy',' = ','\n',dm.params);
+                param_write(fid,sbeg,'min_boxsize_limit',' = ','\n',dm.params);
+                param_write(fid,sbeg,'vol_boxsize_limit',' = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'jega'}
+        param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+        param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        param_write(fid,sbeg,'scaling','','\n',dm.params);
+
+        param_write(fid,sbeg,'seed','                             = ','\n',dm.params);
+        param_write(fid,sbeg,'log_file','                         = ','\n',dm.params);
+        param_write(fid,sbeg,'population_size','                  = ','\n',dm.params);
+        param_write(fid,sbeg,'print_each_pop','','\n',dm.params);
+        param_write(fid,sbeg,'output','                           = ','\n',dm.params);
+        param_write(fid,sbeg,'initialization_type','              = ','\n',dm.params);
+        param_write(fid,sbeg,'mutation_type','                    = ','\n',dm.params);
+        param_write(fid,sbeg,'mutation_scale','                   = ','\n',dm.params);
+        param_write(fid,sbeg,'mutation_rate','                    = ','\n',dm.params);
+        param_write(fid,sbeg,'replacement_type','                 = ','\n',dm.params);
+        param_write(fid,sbeg,'below_limit','                      = ','\n',dm.params);
+        param_write(fid,sbeg,'shrinkage_percentage','             = ','\n',dm.params);
+        param_write(fid,sbeg,'crossover_type','                   = ','\n',dm.params);
+        param_write(fid,sbeg,'multi_point_binary','               = ','\n',dm.params);
+        param_write(fid,sbeg,'multi_point_parameterized_binary',' = ','\n',dm.params);
+        param_write(fid,sbeg,'multi_point_real','                 = ','\n',dm.params);
+        param_write(fid,sbeg,'shuffle_random','                   = ','\n',dm.params);
+        param_write(fid,sbeg,'num_parents','                      = ','\n',dm.params);
+        param_write(fid,sbeg,'num_offspring','                    = ','\n',dm.params);
+        param_write(fid,sbeg,'crossover_rate','                   = ','\n',dm.params);
+
+        switch dm.method
+            case {'moga'}
+                param_write(fid,sbeg,'fitness_type','        = ','\n',dm.params);
+                param_write(fid,sbeg,'niching_type','        = ','\n',dm.params);
+                if ~isempty(dm.params.radial) && ...
+                   ~isempty(dm.params.distance)
+                    error('''%s'' method must have only one niching distance.',...
+                        dm.method);
+                end
+                param_write(fid,sbeg,'radial','              = ','\n',dm.params);
+                param_write(fid,sbeg,'distance','            = ','\n',dm.params);
+                param_write(fid,sbeg,'metric_tracker','','\n',dm.params);
+                param_write(fid,sbeg,'percent_change','      = ','\n',dm.params);
+                param_write(fid,sbeg,'num_generations','     = ','\n',dm.params);
+                param_write(fid,sbeg,'postprocessor_type','  = ','\n',dm.params);
+                param_write(fid,sbeg,'orthogonal_distance',' = ','\n',dm.params);
+
+            case {'soga'}
+                param_write(fid,sbeg,'fitness_type','       = ','\n',dm.params);
+                param_write(fid,sbeg,'constraint_penalty',' = ','\n',dm.params);
+                param_write(fid,sbeg,'replacement_type','   = ','\n',dm.params);
+                param_write(fid,sbeg,'convergence_type','   = ','\n',dm.params);
+                param_write(fid,sbeg,'num_generations','    = ','\n',dm.params);
+                param_write(fid,sbeg,'percent_change','     = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'lsq'}
+        switch dm.method
+            case {'nl2sol'}
+                param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+                param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+                param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+                param_write(fid,sbeg,'output',' ','\n',dm.params);
+                param_write(fid,sbeg,'scaling','','\n',dm.params);
+
+                param_write(fid,sbeg,'function_precision','   = ','\n',dm.params);
+                param_write(fid,sbeg,'absolute_conv_tol','    = ','\n',dm.params);
+                param_write(fid,sbeg,'x_conv_tol','           = ','\n',dm.params);
+                param_write(fid,sbeg,'singular_conv_tol','    = ','\n',dm.params);
+                param_write(fid,sbeg,'singular_radius','      = ','\n',dm.params);
+                param_write(fid,sbeg,'false_conv_tol','       = ','\n',dm.params);
+                param_write(fid,sbeg,'initial_trust_radius',' = ','\n',dm.params);
+                param_write(fid,sbeg,'covariance','           = ','\n',dm.params);
+                param_write(fid,sbeg,'regression_stressbalances','','\n',dm.params);
+
+            case {'nlssol_sqp'}
+                param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+                param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+                param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+                param_write(fid,sbeg,'constraint_tolerance','     = ','\n',dm.params);
+                param_write(fid,sbeg,'output',' ','\n',dm.params);
+                param_write(fid,sbeg,'speculative','','\n',dm.params);
+                param_write(fid,sbeg,'scaling','','\n',dm.params);
+
+                param_write(fid,sbeg,'verify_level','         = ','\n',dm.params);
+                param_write(fid,sbeg,'function_precision','   = ','\n',dm.params);
+                param_write(fid,sbeg,'linesearch_tolerance',' = ','\n',dm.params);
+
+            case {'optpp_g_newton'}
+                param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+                param_write(fid,sbeg,'max_function_evaluations',' = ','\n',dm.params);
+                param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+                param_write(fid,sbeg,'output',' ','\n',dm.params);
+                param_write(fid,sbeg,'speculative','','\n',dm.params);
+                param_write(fid,sbeg,'scaling','','\n',dm.params);
+
+                if (dm.params.value_based_line_search + ...
+                    dm.params.gradient_based_line_search + ...
+                    dm.params.trust_region + ...
+                    dm.params.tr_pds > 1)
+                    error('''%s'' method must have only one algorithm.',...
+                        dm.method);
+                end
+                param_write(fid,sbeg,'value_based_line_search','','\n',dm.params);
+                param_write(fid,sbeg,'gradient_based_line_search','','\n',dm.params);
+                param_write(fid,sbeg,'trust_region','','\n',dm.params);
+                param_write(fid,sbeg,'tr_pds','','\n',dm.params);
+                param_write(fid,sbeg,'max_step','               = ','\n',dm.params);
+                param_write(fid,sbeg,'gradient_tolerance','     = ','\n',dm.params);
+                param_write(fid,sbeg,'merit_function','         = ','\n',dm.params);
+                param_write(fid,sbeg,'central_path','           = ','\n',dm.params);
+                param_write(fid,sbeg,'steplength_to_boundary',' = ','\n',dm.params);
+                param_write(fid,sbeg,'centering_parameter','    = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'nond'}
+        switch dm.method
+            case {'nond_sampling'}
+                param_write(fid,sbeg,'seed','             = ','\n',dm.params);
+                param_write(fid,sbeg,'fixed_seed','','\n',dm.params);
+                dver=textscan(IssmConfig('_DAKOTA_VERSION_'),'%[0123456789].%[0123456789].%[0123456789]');
+                if ((str2num(dver{1}{1})==4 && str2num(dver{2}{1})>2) || str2num(dver{1}{1})>4)
+                    param_write(fid,sbeg,'rng','                ','\n',dm.params);
+                end
+                param_write(fid,sbeg,'samples','          = ','\n',dm.params);
+                param_write(fid,sbeg,'sample_type','        ','\n',dm.params);
+                param_write(fid,sbeg,'all_variables','','\n',dm.params);
+                param_write(fid,sbeg,'variance_based_decomp','','\n',dm.params);
+                if strcmp(dm.params.sample_type,'incremental_random') || ...
+                   strcmp(dm.params.sample_type,'incremental_lhs'   )
+                    param_write(fid,sbeg,'previous_samples',' = ','\n',dm.params);
+                end
+                param_write(fid,sbeg,'output',' ','\n',dm.params);
+
+            case {'nond_local_reliability'}
+                param_write(fid,sbeg,'max_iterations','           = ','\n',dm.params);
+                param_write(fid,sbeg,'convergence_tolerance','    = ','\n',dm.params);
+
+                param_write(fid,sbeg,'mpp_search','  = ','\n',dm.params);
+                if ischar(dm.params.mpp_search)
+                    if (dm.params.sqp + ...
+                        dm.params.nip > 1)
+                        error('''%s'' method must have only one algorithm.',...
+                            dm.method);
+                    end
+                    param_write(fid,sbeg,'sqp','','\n',dm.params);
+                    param_write(fid,sbeg,'nip','','\n',dm.params);
+                    param_write(fid,sbeg,'integration','   ','\n',dm.params);
+                    param_write(fid,sbeg,'refinement','  = ','\n',dm.params);
+                    if ischar(dm.params.refinement)
+                        param_write(fid,sbeg,'samples','     = ','\n',dm.params);
+                        param_write(fid,sbeg,'seed','        = ','\n',dm.params);
+                    end
+                end
+                param_write(fid,sbeg,'output',' ','\n',dm.params);
+
+            case {'nond_global_reliability'}
+                if (dm.params.x_gaussian_process + ...
+                    dm.params.u_gaussian_process ~= 1)
+                    error('''%s'' method must have one and only one algorithm.',...
+                        dm.method);
+                end
+                param_write(fid,sbeg,'x_gaussian_process','','\n',dm.params);
+                param_write(fid,sbeg,'u_gaussian_process','','\n',dm.params);
+                param_write(fid,sbeg,'all_variables','','\n',dm.params);
+                param_write(fid,sbeg,'seed',' = ','\n',dm.params);
+
+            case {'nond_polynomial_chaos'}
+                param_write(fid,sbeg,'expansion_order','       = ','\n',dm.params);
+                param_write(fid,sbeg,'expansion_terms','       = ','\n',dm.params);
+                param_write(fid,sbeg,'quadrature_order','      = ','\n',dm.params);
+                param_write(fid,sbeg,'sparse_grid_level','     = ','\n',dm.params);
+                param_write(fid,sbeg,'expansion_samples','     = ','\n',dm.params);
+                param_write(fid,sbeg,'incremental_lhs','','\n',dm.params);
+                param_write(fid,sbeg,'collocation_points','    = ','\n',dm.params);
+                param_write(fid,sbeg,'collocation_ratio','     = ','\n',dm.params);
+                param_write(fid,sbeg,'reuse_samples','','\n',dm.params);
+                param_write(fid,sbeg,'expansion_import_file',' = ','\n',dm.params);
+                param_write(fid,sbeg,'seed','                  = ','\n',dm.params);
+                param_write(fid,sbeg,'fixed_seed','','\n',dm.params);
+                param_write(fid,sbeg,'samples','               = ','\n',dm.params);
+                param_write(fid,sbeg,'sample_type','           = ','\n',dm.params);
+                param_write(fid,sbeg,'all_variables','','\n',dm.params);
+
+            case {'nond_stoch_collocation'}
+                param_write(fid,sbeg,'quadrature_order','  = ','\n',dm.params);
+                param_write(fid,sbeg,'sparse_grid_level',' = ','\n',dm.params);
+                param_write(fid,sbeg,'seed','              = ','\n',dm.params);
+                param_write(fid,sbeg,'fixed_seed','','\n',dm.params);
+                param_write(fid,sbeg,'samples','           = ','\n',dm.params);
+                param_write(fid,sbeg,'sample_type','       = ','\n',dm.params);
+                param_write(fid,sbeg,'all_variables','','\n',dm.params);
+
+            case {'nond_evidence'}
+                param_write(fid,sbeg,'seed','    = ','\n',dm.params);
+                param_write(fid,sbeg,'samples',' = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'dace'}
+        switch dm.method
+            case {'dace'}
+                if (dm.params.grid + ...
+                    dm.params.random + ...
+                    dm.params.oas + ...
+                    dm.params.lhs + ...
+                    dm.params.oa_lhs + ...
+                    dm.params.box_behnken + ...
+                    dm.params.central_composite ~= 1)
+                    error('''%s'' method must have one and only one algorithm.',...
+                        dm.method);
+                end
+                param_write(fid,sbeg,'grid','','\n',dm.params);
+                param_write(fid,sbeg,'random','','\n',dm.params);
+                param_write(fid,sbeg,'oas','','\n',dm.params);
+                param_write(fid,sbeg,'lhs','','\n',dm.params);
+                param_write(fid,sbeg,'oa_lhs','','\n',dm.params);
+                param_write(fid,sbeg,'box_behnken','','\n',dm.params);
+                param_write(fid,sbeg,'central_composite','','\n',dm.params);
+                param_write(fid,sbeg,'seed','    = ','\n',dm.params);
+                param_write(fid,sbeg,'fixed_seed','','\n',dm.params);
+                param_write(fid,sbeg,'samples',' = ','\n',dm.params);
+                param_write(fid,sbeg,'symbols',' = ','\n',dm.params);
+                param_write(fid,sbeg,'quality_metrics','','\n',dm.params);
+                param_write(fid,sbeg,'variance_based_decomp','','\n',dm.params);
+
+            case {'fsu_quasi_mc'}
+                if (dm.params.halton + ...
+                    dm.params.hammersley ~= 1)
+                    error('''%s'' method must have one and only one sequence type.',...
+                        dm.method);
+                end
+                param_write(fid,sbeg,'halton','','\n',dm.params);
+                param_write(fid,sbeg,'hammersley','','\n',dm.params);
+                param_write(fid,sbeg,'samples','        = ','\n',dm.params);
+                param_write(fid,sbeg,'sequence_start',' = ','\n',dm.params);
+                param_write(fid,sbeg,'sequence_leap','  = ','\n',dm.params);
+                param_write(fid,sbeg,'prime_base','     = ','\n',dm.params);
+                param_write(fid,sbeg,'fixed_sequence','','\n',dm.params);
+                param_write(fid,sbeg,'latinize','','\n',dm.params);
+                param_write(fid,sbeg,'variance_based_decomp','','\n',dm.params);
+                param_write(fid,sbeg,'quality_metrics','','\n',dm.params);
+
+            case {'fsu_cvt'}
+                param_write(fid,sbeg,'seed','       = ','\n',dm.params);
+                param_write(fid,sbeg,'fixed_seed','','\n',dm.params);
+                param_write(fid,sbeg,'samples','    = ','\n',dm.params);
+                param_write(fid,sbeg,'num_trials',' = ','\n',dm.params);
+                param_write(fid,sbeg,'trial_type',' = ','\n',dm.params);
+                param_write(fid,sbeg,'latinize','','\n',dm.params);
+                param_write(fid,sbeg,'variance_based_decomp','','\n',dm.params);
+                param_write(fid,sbeg,'quality_metrics','','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+    case {'param'}
+        param_write(fid,sbeg,'output',' ','\n',dm.params);
+        switch dm.method
+            case {'vector_parameter_study'}
+                if ~xor(isempty(dm.params.final_point), ...
+                        isempty(dm.params.step_vector))
+                    error('''%s'' method must have one and only one specification.',...
+                        dm.method);
+                end
+                if     ~isempty(dm.params.final_point)
+                    param_write(fid,sbeg,'final_point',' = ','\n',dm.params);
+                    param_write(fid,sbeg,'step_length',' = ','\n',dm.params);
+                    param_write(fid,sbeg,'num_steps','   = ','\n',dm.params);
+                elseif ~isempty(dm.params.step_vector)
+                    param_write(fid,sbeg,'step_vector',' = ','\n',dm.params);
+                    param_write(fid,sbeg,'num_steps','   = ','\n',dm.params);
+                end
+
+            case {'list_parameter_study'}
+                param_write(fid,sbeg,'list_of_points',' = ','\n',dm.params);
+
+            case {'centered_parameter_study'}
+                param_write(fid,sbeg,'percent_delta','       = ','\n',dm.params);
+                param_write(fid,sbeg,'deltas_per_variable',' = ','\n',dm.params);
+
+            case {'multidim_parameter_study'}
+                param_write(fid,sbeg,'partitions',' = ','\n',dm.params);
+
+            otherwise
+                error('Unrecognized ''%s'' method: ''%s''.',dm.type,dm.method);
+        end
+
+	case {'bayes'}
+		switch dm.method
+				case {'bayes_calibration'}
+               % if (dm.params.queso + ...
+                %    dm.params.dream + ...
+					%	 dm.params.gpmsa ~= 1)
+                %    error('''%s'' method must have one and only one bayes type. YOU SUCK',...
+                 %       dm.method);
+               % end
+                param_write(fid,sbeg,'queso','','\n',dm.params);
+                param_write(fid,sbeg,'dream','','\n',dm.params);
+                param_write(fid,sbeg,'gpmsa','','\n',dm.params);
+                param_write(fid,sbeg,'samples','        = ','\n',dm.params);
+                param_write(fid,sbeg,'seed','      = ','\n',dm.params);
+					 param_write(fid,sbeg,'output','    =','\n',dm.params);
+					 param_write(fid,sbeg,'metropolis_hastings','','\n',dm.params);
+					 param_write(fid,sbeg,'proposal_covariance','','\n',dm.params);
+					 param_write(fid,sbeg,'diagonal','','\n',dm.params);
+					 param_write(fid,sbeg,'values','     = ','\n',dm.params);
+		end
+
+	case {'polynomial_chaos'}
+		switch dm.method
+				case {'polynomial_chaos'}
+					param_write(fid,sbeg,'sparse_grid_level',' = ','\n',dm.params);
+					fprintf(fid,'\t  dimension_adaptive p_refinement sobol\n');
+					fprintf(fid,'\t  \tmax_iterations  = 3\n');
+					fprintf(fid,'\t  \tconvergence_tol = 1.e-1\n');
+			end
+
+    otherwise
+        error('Unrecognized method type: ''%s''.',dm.type);
+end
+
+end
+
+function param_struc_write(fidi,sbeg,smid,send,params) % {{{
+	%%  function to write a structure of parameters
+
+	%  loop through each parameter field in the structure
+
+	fnames=fieldnames(params);
+
+	for i=1:numel(fnames)
+		param_write(fidi,sbeg,fnames{i},smid,send,params);
+	end
+
+end %}}}
+function param_write(fidi,sbeg,pname,smid,send,params) % {{{
+%%  function to write a parameter
+
+	%  check for errors
+
+	if ~isfield(params,pname)
+		warning('param_write:param_not_found','Parameter ''%s'' not found in ''%s''.',pname,inputname(6));
+		return
+	elseif islogical(params.(pname)) && ~params.(pname)
+		return
+	elseif isempty(params.(pname))
+		warning('param_write:param_empty','Parameter ''%s'' requires input of type ''%s''.',...
+			pname,class(params.(pname)));
+		return
+	end
+
+	%  construct the parameter string based on type
+	if islogical(params.(pname))
+		fprintf(fidi,[sbeg '%s' send],pname);
+	elseif isnumeric(params.(pname))
+		fprintf(fidi,[sbeg '%s' smid '%g'],pname,params.(pname)(1));
+		for i=2:numel(params.(pname))
+			fprintf(fidi,[' %g'],params.(pname)(i));
+		end
+		fprintf(fidi,[send]);
+	elseif ischar   (params.(pname))
+		fprintf(fidi,[sbeg '%s' smid '%s' send],pname,params.(pname));
+	else
+		warning('param_write:param_unrecog','Parameter ''%s'' is of unrecognized type ''%s''.',pname,class(params.(pname)));
+		return
+	end
+
+end% }}}
Index: /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_write.py
===================================================================
--- /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_write.py	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/dakota_method/dmeth_params_write.py	(revision 24686)
@@ -0,0 +1,538 @@
+from dakota_method import *
+from MatlabFuncs import *
+from IssmConfig import *
+#move this later:
+from helpers import *
+
+
+def dmeth_params_write(dm, fid, sbeg='\t  '):
+    '''  write the parameters from a dakota_method object.
+    [] = dmeth_params_write(dm, fid, sbeg)
+    '''
+
+    if not isinstance(dm, dakota_method):
+        raise RuntimeError('Object ' + str(dm) + ' is a ' + type(dm) + ' class object, not < dakota_method > .')
+
+    if sbeg is None or sbeg == '':
+        sbeg = '\t  '
+
+    #  perform some error checking, but leave the rest to dakota.
+    #  unfortunately this prevents merely looping through the fields
+    #  of the parameters structure.
+
+    #  write method - indepent controls
+
+    # param_write(fid, sbeg, 'id_method', ' = ', '\n', dm.params)
+    # param_write(fid, sbeg, 'model_pointer', ' = ', '\n', dm.params)
+
+    #  write method - depent controls
+
+    #switch dm.type
+    if dm.type == 'dot':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'constraint_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'speculative', '', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+    #switch dm.method
+        if dm.method in ['dot_bfgs',
+                         'dot_frcg',
+                         'dot_mmfd',
+                         'dot_slp',
+                         'dot_sqp']:
+            param_write(fid, sbeg, 'optimization_type', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'npsol':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'constraint_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'speculative', '', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+    #switch dm.method
+        if dm.method == 'npsol_sqp':
+            param_write(fid, sbeg, 'verify_level', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'function_precision', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'linesearch_tolerance', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'conmin':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'constraint_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'speculative', '', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+    #switch dm.method
+        if dm.method in ['conmin_frcg', 'conmin_mfd']:
+            pass
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'optpp':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'speculative', '', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+    #switch dm.method
+        if dm.method == 'optpp_cg':
+            param_write(fid, sbeg, 'max_step', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'gradient_tolerance', ' = ', '\n', dm.params)
+
+        elif dm.method in ['optpp_q_newton', 'optpp_fd_newton', 'optpp_newton']:
+            if (dm.params.value_based_line_search + dm.params.gradient_based_line_search + dm.params.trust_region + dm.params.tr_pds > 1):
+                raise RuntimeError('  #s'' method must have only one algorithm.', dm.method)
+            param_write(fid, sbeg, 'value_based_line_search', '', '\n', dm.params)
+            param_write(fid, sbeg, 'gradient_based_line_search', '', '\n', dm.params)
+            param_write(fid, sbeg, 'trust_region', '', '\n', dm.params)
+            param_write(fid, sbeg, 'tr_pds', '', '\n', dm.params)
+            param_write(fid, sbeg, 'max_step', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'gradient_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'merit_function', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'central_path', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'steplength_to_boundary', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'centering_parameter', ' = ', '\n', dm.params)
+
+        elif dm.method == 'optpp_pds':
+            param_write(fid, sbeg, 'search_scheme_size', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'apps':
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'constraint_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+    #switch dm.method
+        if dm.method == 'asynch_pattern_search':
+            param_write(fid, sbeg, 'initial_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'threshold_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'contraction_factor', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'solution_target', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'synchronization', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'merit_function', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constraint_penalty', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'smoothing_factor', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'coliny':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+        param_write(fid, sbeg, 'show_misc_options', '', '\n', dm.params)
+        param_write(fid, sbeg, 'misc_options', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'solution_accuracy', ' = ', '\n', dm.params)
+    #switch dm.method
+        if dm.method == 'coliny_cobyla':
+            param_write(fid, sbeg, 'initial_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'threshold_delta', ' = ', '\n', dm.params)
+
+        elif dm.method == 'coliny_direct':
+            param_write(fid, sbeg, 'division', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'global_balance_parameter', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'local_balance_parameter', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'max_boxsize_limit', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'min_boxsize_limit', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constraint_penalty', ' = ', '\n', dm.params)
+
+        elif dm.method == 'coliny_ea':
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'population_size', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'initialization_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fitness_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'replacement_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'random', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'chc', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'elitist', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'new_solutions_generated', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'crossover_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'crossover_rate', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'mutation_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'mutation_scale', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'mutation_range', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'dimension_ratio', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'mutation_rate', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'non_adaptive', '', '\n', dm.params)
+
+        elif dm.method == 'coliny_pattern_search':
+            param_write(fid, sbeg, 'stochastic', '', '\n', dm.params)
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'initial_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'threshold_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constraint_penalty', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constant_penalty', '', '\n', dm.params)
+            param_write(fid, sbeg, 'pattern_basis', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'total_pattern_size', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'no_expansion', '', '\n', dm.params)
+            param_write(fid, sbeg, 'expand_after_success', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'contraction_factor', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'synchronization', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'exploratory_moves', ' = ', '\n', dm.params)
+
+        elif dm.method == 'coliny_solis_wets':
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'initial_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'threshold_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'no_expansion', '', '\n', dm.params)
+            param_write(fid, sbeg, 'expand_after_success', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'contract_after_failure', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'contraction_factor', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constraint_penalty', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constant_penalty', '', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'ncsu':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+    #switch dm.method
+        if dm.method == 'ncsu_direct':
+            param_write(fid, sbeg, 'solution_accuracy', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'min_boxsize_limit', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'vol_boxsize_limit', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'jega':
+        param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+        param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+        param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'log_file', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'population_size', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'print_each_pop', '', '\n', dm.params)
+        param_write(fid, sbeg, 'output', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'initialization_type', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'mutation_type', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'mutation_scale', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'mutation_rate', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'replacement_type', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'below_limit', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'shrinkage_percentage', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'crossover_type', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'multi_point_binary', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'multi_point_parameterized_binary', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'multi_point_real', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'shuffle_random', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'num_parents', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'num_offspring', ' = ', '\n', dm.params)
+        param_write(fid, sbeg, 'crossover_rate', ' = ', '\n', dm.params)
+
+    #switch dm.method
+        if dm.method == 'moga':
+            param_write(fid, sbeg, 'fitness_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'niching_type', ' = ', '\n', dm.params)
+            if not isempty(dm.params.radial) and not isempty(dm.params.distance):
+                raise RuntimeError('  #s'' method must have only one niching distance.', dm.method)
+            param_write(fid, sbeg, 'radial', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'distance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'metric_tracker', '', '\n', dm.params)
+            param_write(fid, sbeg, 'percent_change', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'num_generations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'postprocessor_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'orthogonal_distance', ' = ', '\n', dm.params)
+
+        elif dm.method == 'soga':
+            param_write(fid, sbeg, 'fitness_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constraint_penalty', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'replacement_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'convergence_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'num_generations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'percent_change', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'lsq':
+        #switch dm.method
+        if dm.method == 'nl2sol':
+            param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+            param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+            param_write(fid, sbeg, 'function_precision', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'absolute_conv_tol', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'x_conv_tol', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'singular_conv_tol', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'singular_radius', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'false_conv_tol', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'initial_trust_radius', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'covariance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'regression_stressbalances', '', '\n', dm.params)
+
+        elif dm.method == 'nlssol_sqp':
+            param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'constraint_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+            param_write(fid, sbeg, 'speculative', '', '\n', dm.params)
+            param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+            param_write(fid, sbeg, 'verify_level', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'function_precision', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'linesearch_tolerance', ' = ', '\n', dm.params)
+
+        elif dm.method == 'optpp_g_newton':
+            param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'max_function_evaluations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+            param_write(fid, sbeg, 'speculative', '', '\n', dm.params)
+            param_write(fid, sbeg, 'scaling', '', '\n', dm.params)
+
+            if (dm.params.value_based_line_search + dm.params.gradient_based_line_search + dm.params.trust_region + dm.params.tr_pds > 1):
+                raise RuntimeError('  #s'' method must have only one algorithm.', dm.method)
+
+            param_write(fid, sbeg, 'value_based_line_search', '', '\n', dm.params)
+            param_write(fid, sbeg, 'gradient_based_line_search', '', '\n', dm.params)
+            param_write(fid, sbeg, 'trust_region', '', '\n', dm.params)
+            param_write(fid, sbeg, 'tr_pds', '', '\n', dm.params)
+            param_write(fid, sbeg, 'max_step', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'gradient_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'merit_function', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'central_path', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'steplength_to_boundary', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'centering_parameter', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'nond':
+        #switch dm.method
+        if dm.method == 'nond_sampling':
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fixed_seed', '', '\n', dm.params)
+            dver = str(IssmConfig('_DAKOTA_VERSION_')[0])
+            if ((int(dver[0]) == 4 and int(dver[2]) > 2) or int(dver[0]) > 4):
+                param_write(fid, sbeg, 'rng', '                ', '\n', dm.params)
+                param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+                param_write(fid, sbeg, 'sample_type', '        ', '\n', dm.params)
+                param_write(fid, sbeg, 'all_variables', '', '\n', dm.params)
+                param_write(fid, sbeg, 'variance_based_decomp', '', '\n', dm.params)
+                if strcmp(dm.params.sample_type, 'incremental_random') or strcmp(dm.params.sample_type, 'incremental_lhs'):
+                    param_write(fid, sbeg, 'previous_samples', ' = ', '\n', dm.params)
+                    param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+
+        elif dm.method == 'nond_local_reliability':
+            param_write(fid, sbeg, 'max_iterations', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'convergence_tolerance', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'mpp_search', ' = ', '\n', dm.params)
+            if type(dm.params.mpp_search) == str:
+                if (dm.params.sqp + dm.params.nip > 1):
+                    raise RuntimeError('  #s'' method must have only one algorithm.', dm.method)
+
+                param_write(fid, sbeg, 'sqp', '', '\n', dm.params)
+                param_write(fid, sbeg, 'nip', '', '\n', dm.params)
+                param_write(fid, sbeg, 'integration', '   ', '\n', dm.params)
+                param_write(fid, sbeg, 'refinement', ' = ', '\n', dm.params)
+                if type(dm.params.refinement) == str:
+                    param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+                    param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+                    param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+
+        elif dm.method == 'nond_global_reliability':
+            if (dm.params.x_gaussian_process + dm.params.u_gaussian_process != 1):
+                raise RuntimeError('  #s'' method must have one and only one algorithm.', dm.method)
+
+            param_write(fid, sbeg, 'x_gaussian_process', '', '\n', dm.params)
+            param_write(fid, sbeg, 'u_gaussian_process', '', '\n', dm.params)
+            param_write(fid, sbeg, 'all_variables', '', '\n', dm.params)
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+
+        elif dm.method == 'nond_polynomial_chaos':
+            param_write(fid, sbeg, 'expansion_order', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'expansion_terms', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'quadrature_order', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'sparse_grid_level', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'expansion_samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'incremental_lhs', '', '\n', dm.params)
+            param_write(fid, sbeg, 'collocation_points', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'collocation_ratio', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'reuse_samples', '', '\n', dm.params)
+            param_write(fid, sbeg, 'expansion_import_file', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fixed_seed', '', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'sample_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'all_variables', '', '\n', dm.params)
+
+        elif dm.method == 'nond_stoch_collocation':
+            param_write(fid, sbeg, 'quadrature_order', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'sparse_grid_level', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fixed_seed', '', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'sample_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'all_variables', '', '\n', dm.params)
+
+        elif dm.method == 'nond_evidence':
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'dace':
+        #switch dm.method
+        if dm.method == 'dace':
+            if (dm.params.grid + dm.params.random + dm.params.oas + dm.params.lhs + dm.params.oa_lhs + dm.params.box_behnken + dm.params.central_composite != 1):
+                raise RuntimeError('  #s'' method must have one and only one algorithm.', dm.method)
+
+            param_write(fid, sbeg, 'grid', '', '\n', dm.params)
+            param_write(fid, sbeg, 'random', '', '\n', dm.params)
+            param_write(fid, sbeg, 'oas', '', '\n', dm.params)
+            param_write(fid, sbeg, 'lhs', '', '\n', dm.params)
+            param_write(fid, sbeg, 'oa_lhs', '', '\n', dm.params)
+            param_write(fid, sbeg, 'box_behnken', '', '\n', dm.params)
+            param_write(fid, sbeg, 'central_composite', '', '\n', dm.params)
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fixed_seed', '', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'symbols', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'quality_metrics', '', '\n', dm.params)
+            param_write(fid, sbeg, 'variance_based_decomp', '', '\n', dm.params)
+
+        elif dm.method == 'fsu_quasi_mc':
+            if (dm.params.halton + dm.params.hammersley != 1):
+                raise RuntimeError('  #s'' method must have one and only one sequence type.', dm.method)
+
+            param_write(fid, sbeg, 'halton', '', '\n', dm.params)
+            param_write(fid, sbeg, 'hammersley', '', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'sequence_start', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'sequence_leap', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'prime_base', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fixed_sequence', '', '\n', dm.params)
+            param_write(fid, sbeg, 'latinize', '', '\n', dm.params)
+            param_write(fid, sbeg, 'variance_based_decomp', '', '\n', dm.params)
+            param_write(fid, sbeg, 'quality_metrics', '', '\n', dm.params)
+
+        elif dm.method == 'fsu_cvt':
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'fixed_seed', '', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'num_trials', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'trial_type', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'latinize', '', '\n', dm.params)
+            param_write(fid, sbeg, 'variance_based_decomp', '', '\n', dm.params)
+            param_write(fid, sbeg, 'quality_metrics', '', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'param':
+        param_write(fid, sbeg, 'output', ' ', '\n', dm.params)
+    #switch dm.method
+        if dm.method == 'vector_parameter_study':
+            if not np.logical_xor(isempty(dm.params.final_point), isempty(dm.params.step_vector)):
+                raise RuntimeError(str(dm.method) + ' method must have one and only one specification.')
+
+            if not isempty(dm.params.final_point):
+                param_write(fid, sbeg, 'final_point', ' = ', '\n', dm.params)
+                param_write(fid, sbeg, 'step_length', ' = ', '\n', dm.params)
+                param_write(fid, sbeg, 'num_steps', ' = ', '\n', dm.params)
+
+            elif not isempty(dm.params.step_vector):
+                param_write(fid, sbeg, 'step_vector', ' = ', '\n', dm.params)
+                param_write(fid, sbeg, 'num_steps', ' = ', '\n', dm.params)
+
+        elif dm.method == 'list_parameter_study':
+            param_write(fid, sbeg, 'list_of_points', ' = ', '\n', dm.params)
+
+        elif dm.method == 'centered_parameter_study':
+            param_write(fid, sbeg, 'percent_delta', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'deltas_per_variable', ' = ', '\n', dm.params)
+
+        elif dm.method == 'multidim_parameter_study':
+            param_write(fid, sbeg, 'partitions', ' = ', '\n', dm.params)
+
+        else:
+            raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+    elif dm.type == 'bayes':
+        #switch dm.method
+        if dm.method == 'bayes_calibration':
+            # if (dm.params.queso +
+            #    dm.params.dream +
+            #     dm.params.gpmsa ~= 1)
+            #    raise RuntimeError('''  #s'' method must have one and only one bayes type. YOU SUCK',
+            #       dm.method)
+            #
+            param_write(fid, sbeg, 'queso', '', '\n', dm.params)
+            param_write(fid, sbeg, 'dream', '', '\n', dm.params)
+            param_write(fid, sbeg, 'gpmsa', '', '\n', dm.params)
+            param_write(fid, sbeg, 'samples', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'seed', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'output', ' = ', '\n', dm.params)
+            param_write(fid, sbeg, 'metropolis_hastings', '', '\n', dm.params)
+            param_write(fid, sbeg, 'proposal_covariance', '', '\n', dm.params)
+            param_write(fid, sbeg, 'diagonal', '', '\n', dm.params)
+            param_write(fid, sbeg, 'values', ' = ', '\n', dm.params)
+
+    else:
+        raise RuntimeError('Unrecognized ' + dm.type + ' method: ' + dm.method + '.')
+
+
+#  function to write a structure of parameters
+def param_struc_write(fidi, sbeg, smid, s, params):
+    #  loop through each parameter field in the structure
+    fnames = fieldnames(params)
+    for i in range(np.size(fnames)):
+        param_write(fidi, sbeg, fnames[i], smid, s, params)
+
+    return
+
+
+#  function to write a parameter
+def param_write(fidi, sbeg, pname, smid, s, params):
+    #  check for errors
+    if not isfield(params, pname):
+        warning('param_write:param_not_found', 'Parameter ' + str(pname) + ' not found in ' + params + '.')
+        return
+    elif type(vars(params)[pname]) == bool and not vars(params)[pname]:
+        return
+    elif isempty(vars(params)[pname]):
+        print('Warning: param_write:param_empty: Parameter {} requires input of type {}.'.format(pname, type(vars(params)[pname])))
+        return
+
+    #  construct the parameter string based on type
+    if type(vars(params)[pname]) == bool:
+        fidi.write(sbeg + str(pname) + s)
+
+    elif type(vars(params)[pname]) in [int, float]:
+        fidi.write(sbeg + str(pname) + smid + str(vars(params)[pname]) + s)
+
+    elif type(vars(params)[pname]) == list:
+        fidi.write(sbeg + str(pname) + smid + str(vars(params)[pname][0]))
+        for i in range(1, np.size(vars(params)[pname])):
+            fidi.write(' ' + str(vars(params)[pname][i]))
+
+        fidi.write(s)
+
+    elif type(vars(params)[pname]) == str:
+        fidi.write(sbeg + str(pname) + smid + str(vars(params)[pname]) + s)
+
+    else:
+        print('Warning: param_write:param_unrecog: Parameter {} is of unrecognized type {}.'.format(pname, type(vars(params)[pname])))
+        return
Index: /issm/trunk/src/m/classes/qmu/histogram_bin_uncertain.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/histogram_bin_uncertain.m	(revision 24686)
+++ /issm/trunk/src/m/classes/qmu/histogram_bin_uncertain.m	(revision 24686)
@@ -0,0 +1,141 @@
+% %  definition for the histogram_bin_uncertain class.
+%
+%  [hbu]=histogram_bin_uncertain(varargin)
+%
+%  where the required varargin are:
+%    descriptor    (char, description, '')
+%    pairs_per_variable          (double vector, [])
+%    abscissas          (double vector, [])
+%    counts          (int vector, [])
+%
+%  note that zero arguments constructs a default instance; one
+%  argument of the class copies the instance; and three or more
+%  arguments constructs a new instance from the arguments.
+%
+classdef histogram_bin_uncertain
+    properties
+        descriptor='';
+		pairs_per_variable=[];
+        abscissas = [];
+        counts = [];
+    end
+
+    methods
+        function [hbu]=histogram_bin_uncertain(varargin) % {{{
+
+            switch nargin
+                case 0 %  create a default object
+                case 1 %  copy the object
+                    if isa(varargin{1},'histogram_bin_uncertain')
+                        hbu=varargin{1};
+                    else
+                        error('Object ''%s'' is a ''%s'' class object, not ''%s''.',...
+                            inputname(1),class(varargin{1}),'histogram_bin_uncertain');
+                    end
+                case {2,3} %  not enough arguments
+                    error('Construction of ''%s'' class object requires at least %d inputs.',...
+                        'histogram_bin_uncertain',4)
+                case 4 % 
+					%  create the object from the input
+					hbu = histogram_bin_uncertain; 
+					hbu.descriptor=varargin{1};
+					hbu.pairs_per_variable=varargin{2};
+					hbu.abscissas=varargin{3};
+					hbu.counts=varargin{4};
+
+                otherwise 
+					error('Construction of histogram_bin_uncertain class object requires three arguments, descriptor, abscissas and counts');
+            end
+
+        end % }}}
+        function []=disp(hbu) % {{{
+
+%  display the object
+
+            disp(sprintf('\n'));
+            for i=1:numel(hbu)
+                disp(sprintf('class ''%s'' object ''%s%s'' = \n',...
+                    class(hbu),inputname(1),string_dim(hbu,i)));
+                disp(sprintf('    descriptor: ''%s'''  ,hbu(i).descriptor));
+                disp(sprintf('          pairs_per_variable: %g'      ,hbu(i).pairs_per_variable));
+                disp(sprintf('          abscissas: %g'      ,hbu(i).abscissas));
+                disp(sprintf('        counts: %g'      ,hbu(i).counts));
+            end
+
+        end % }}}
+        function [desc]  =prop_desc(hbu,dstr) % {{{ 
+            desc=cell(1,numel(hbu));
+            for i=1:numel(hbu)
+                if ~isempty(hbu(i).descriptor)
+                    desc(i)=cellstr(hbu(i).descriptor);
+                elseif ~isempty(inputname(1))
+                    desc(i)=cellstr([inputname(1) string_dim(hbu,i,'vector')]);
+                elseif exist('dstr','var')
+                    desc(i)=cellstr([dstr         string_dim(hbu,i,'vector')]);
+                else
+                    desc(i)=cellstr(['hbu'        string_dim(hbu,i,'vector')]);
+                end
+            end
+            desc=allempty(desc);
+        end  %}}}
+        function [initpt]=prop_initpt(hbu) % {{{
+            initpt=[];
+        end % }}}
+        function [lower] =prop_lower(hbu) % {{{
+            lower=[];
+        end % }}}
+        function [upper] =prop_upper(hbu) % {{{
+            upper=[];
+        end % }}}
+        function [mean]  =prop_mean(hbu) % {{{
+            mean=[];
+        end % }}}
+        function [stddev]=prop_stddev(hbu) % {{{
+            stddev=[];
+        end % }}}
+        function [initst]=prop_initst(hbu) % {{{ 
+            initst=[];
+        end % }}}
+        function [stype] =prop_stype(hbu) % {{{
+            stype={};
+        end % }}}
+        function [scale] =prop_scale(hbu) % {{{
+            scale=[]; 
+        end % }}}
+		function [abscissas] =prop_abscissas(hbu) % {{{
+		abscissas=[]; 
+		for i=1:numel(hbu)
+			abscissas=[abscissas hbu(i).abscissas];
+		end
+		abscissas=allequal(abscissas,-Inf);
+
+        end % }}}
+		function [pairs_per_variable] =prop_pairs_per_variable(hbu) % {{{
+			pairs_per_variable=zeros(1,numel(hbu));
+            for i=1:numel(hbu)
+                pairs_per_variable(i)=hbu(i).pairs_per_variable;
+            end
+            pairs_per_variable=allequal(pairs_per_variable,-Inf);
+        end % }}}
+   		function [counts] =prop_counts(hbu) % {{{
+		counts=[]; 
+		for i=1:numel(hbu)
+			counts=[counts hbu(i).counts];
+		end
+		counts=allequal(counts,-Inf);
+
+        end % }}}
+	end
+    methods (Static)
+        function []=dakota_write(fidi,dvar) % {{{
+
+%  collect only the variables of the appropriate class
+
+            hbu=struc_class(dvar,'histogram_bin_uncertain');
+
+%  write variables
+
+            vlist_write(fidi,'histogram_bin_uncertain','hbu',hbu);
+        end % }}}
+    end
+end
Index: /issm/trunk/src/m/classes/qmu/normal_uncertain.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/normal_uncertain.m	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu/normal_uncertain.m	(revision 24686)
@@ -192,4 +192,14 @@
             scale=[];
         end
+		function [abscissas] =prop_abscissas(hbu) % {{{
+            abscissas=[]; 
+        end % }}}
+        function [counts] =prop_counts(hbu) % {{{
+            counts=[]; 
+        end % }}}
+        function [pairs_per_variable] =prop_pairs_per_variable(hbu) % {{{
+			pairs_per_variable=[];
+        end % }}}
+
     end
 
Index: /issm/trunk/src/m/classes/qmu/response_function.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/response_function.m	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu/response_function.m	(revision 24686)
@@ -43,10 +43,8 @@
             switch nargin
 
-%  create a default object
-
+					%  create a default object
                 case 0
 
-%  copy the object or create the object from the input
-
+						 %  copy the object or create the object from the input
                 otherwise
                     if  (nargin == 1) && isa(varargin{1},'response_function')
@@ -170,22 +168,18 @@
         function [rdesc]=dakota_write(fidi,dresp,rdesc)
 
-%  collect only the responses of the appropriate class
-
+			  %  collect only the responses of the appropriate class
             rf=struc_class(dresp,'response_function');
 
-%  write responses
+				%  write responses
+				[rdesc]=rlist_write(fidi,'response_functions','response_function',rf,rdesc);
+			end
 
-            [rdesc]=rlist_write(fidi,'response_functions','response_function',rf,rdesc);
-        end
+			function []=dakota_rlev_write(fidi,dresp,params)
 
-        function []=dakota_rlev_write(fidi,dresp,params)
+				%  collect only the responses of the appropriate class
+				rf=struc_class(dresp,'response_function');
 
-%  collect only the responses of the appropriate class
-
-            rf=struc_class(dresp,'response_function');
-
-%  write response levels
-
-            rlev_write(fidi,rf,params);
+				%  write response levels
+				rlev_write(fidi,rf,params);
         end
     end
Index: /issm/trunk/src/m/classes/qmu/uniform_uncertain.m
===================================================================
--- /issm/trunk/src/m/classes/qmu/uniform_uncertain.m	(revision 24685)
+++ /issm/trunk/src/m/classes/qmu/uniform_uncertain.m	(revision 24686)
@@ -151,4 +151,14 @@
             scale=[];
         end
+		function [abscissas] =prop_abscissas(hbu) % {{{
+            abscissas=[]; 
+        end % }}}
+        function [counts] =prop_counts(hbu) % {{{
+            counts=[]; 
+        end % }}}
+        function [pairs_per_variable] =prop_pairs_per_variable(hbu) % {{{
+			pairs_per_variable=[];
+        end % }}}
+
     end
 
Index: /issm/trunk/src/m/classes/slr.js
===================================================================
--- /issm/trunk/src/m/classes/slr.js	(revision 24685)
+++ /issm/trunk/src/m/classes/slr.js	(revision 24686)
@@ -7,7 +7,7 @@
 	//methods
 		this.setdefaultparameters = function (){ //{{{
-		
+
 		//Convergence criterion: absolute, relative and residual
-		this.reltol=0.01; // 1 per cent 
+		this.reltol=0.01; // 1 per cent
 		this.abstol=NaN;  //default
 
@@ -15,43 +15,45 @@
 		this.maxiter=5;
 
-		//computational flags: 
+		//computational flags:
 		this.rigid=1;
 		this.elastic=1;
 		this.rotation=0;
 		this.ocean_area_scaling=0;
-		
-		//tidal love numbers: 
+
+		//tidal love numbers:
 		this.tide_love_h=0.6149; //degree 2
 		this.tide_love_k=0.3055; //degree 2
-	
-		//secular fluid love number: 
-		this.fluid_love=0.942; 
-		
-		//moment of inertia: 
-		this.equatorial_moi=8.0077*10^37; // [kg m^2] 
-		this.polar_moi		 =8.0345*10^37; // [kg m^2] 
+
+		//secular fluid love number:
+		this.fluid_love=0.942;
+
+		//moment of inertia:
+		this.equatorial_moi=8.0077*10^37; // [kg m^2]
+		this.polar_moi		 =8.0345*10^37; // [kg m^2]
 
 		// mean rotational velocity of earth:
-		this.angular_velocity=7.2921*10^-5; // [s^-1] 
+		this.angular_velocity=7.2921*10^-5; // [s^-1]
 
 		//numerical discretization accuracy
 		this.degacc=.01;
-		
-		//steric: 
+
+		//steric:
 		this.steric_rate=0;
-		
-		
+
+
 		//output default:
 		this.requested_outputs=['default'];
 
-		//transitions should be a cell array of vectors: 
+		//transitions should be a cell array of vectors:
 		this.transitions=[];
-		
+
 		}// }}}
 		this.checkconsistency = function(md,solution,analyses) { //{{{
 
 			//Early return
-			if(ArrayAnyEqual(ArrayIsMember('SealevelriseAnalysis',analyses),0))return;
-			
+			if (ArrayAnyEqual(ArrayIsMember('SealevelriseAnalysis',analyses),0) || ArrayAnyEqual(ArrayIsMember('TransientSolution',analyses),0) && !md.transient.isslr) {
+				return;
+			}
+
 			md = checkfield(md,'fieldname','slr.deltathickness','NaN',1,'Inf',1,'size',[md.mesh.numberofelements, 1]);
 			md = checkfield(md,'fieldname','slr.sealevel','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices, 1]);
@@ -71,6 +73,6 @@
 			md = checkfield(md,'fieldname','slr.degacc','size',[1, 1],'>=',1e-10);
 			md = checkfield(md,'fieldname','slr.requested_outputs','stringrow',1);
-			
-			//check that love numbers are provided at the same level of accuracy: 
+
+			//check that love numbers are provided at the same level of accuracy:
 			if (this.love_h.length != this.love_k.length || this.love_h.length != this.love_l.length){
 				throw Error('slr error message: love numbers should be provided at the same level of accuracy');
@@ -85,5 +87,5 @@
 		}// }}}
 		this.disp= function(){// {{{
-			
+
 			console.log(sprintf('   Sealevelrise solution parameters:'));
 
@@ -95,5 +97,5 @@
 		fielddisplay(this,'love_h','load Love number for radial displacement');
 		fielddisplay(this,'love_k','load Love number for gravitational potential perturbation');
-		fielddisplay(this,'love_l','load Love number for horizontal displacements'); 
+		fielddisplay(this,'love_l','load Love number for horizontal displacements');
 		fielddisplay(this,'tide_love_h','tidal love number (degree 2)');
 		fielddisplay(this,'tide_love_k','tidal love number (degree 2)');
@@ -101,13 +103,13 @@
 		fielddisplay(this,'equatorial_moi','mean equatorial moment of inertia [kg m^2]');
 		fielddisplay(this,'polar_moi','polar moment of inertia [kg m^2]');
-		fielddisplay(this,'angular_velocity','mean rotational velocity of earth [per second]'); 
+		fielddisplay(this,'angular_velocity','mean rotational velocity of earth [per second]');
 		fielddisplay(this,'rigid','rigid earth graviational potential perturbation');
 		fielddisplay(this,'elastic','elastic earth graviational potential perturbation');
 		fielddisplay(this,'rotation','rotational earth potential perturbation');
-		fielddisplay(this,'ocean_area_scaling','correction for model representation of ocean area [default: No correction]'); 
+		fielddisplay(this,'ocean_area_scaling','correction for model representation of ocean area [default: No correction]');
 		fielddisplay(this,'degacc',"accuracy (default .01 deg) for numerical discretization of the Green's functions");
 		fielddisplay(this,'transitions','indices into parts of the mesh that will be icecaps');
 		fielddisplay(this,'requested_outputs','additional outputs requested');
-		fielddisplay(this,'steric_rate','rate of steric ocean expansion [mm/yr]'); 
+		fielddisplay(this,'steric_rate','rate of steric ocean expansion [mm/yr]');
 		} //}}}
 		this.marshall=function(md,prefix,fid) { //{{{
@@ -171,5 +173,5 @@
 	//{{{
 	this.deltathickness = NaN;
-	this.sealevel       = NaN; 
+	this.sealevel       = NaN;
 	this.maxiter        = 0;
 	this.reltol         = 0;
@@ -178,15 +180,15 @@
 	this.love_k         = 0; //idam
 	this.love_l         = 0; //idam
-	this.tide_love_h    = 0; 
-	this.tide_love_k    = 0; 
-	this.fluid_love	  = 0; 
-	this.equatorial_moi	= 0; 
-	this.polar_moi			= 0; 
-	this.angular_velocity = 0; 
+	this.tide_love_h    = 0;
+	this.tide_love_k    = 0;
+	this.fluid_love	  = 0;
+	this.equatorial_moi	= 0;
+	this.polar_moi			= 0;
+	this.angular_velocity = 0;
 	this.rigid          = 0;
 	this.elastic        = 0;
 	this.rotation       = 0;
 	this.ocean_area_scaling = 0;
-	this.steric_rate    = 0; //rate of ocean expansion from steric effects. 
+	this.steric_rate    = 0; //rate of ocean expansion from steric effects.
 	this.degacc         = 0;
 	this.requested_outputs = [];
Index: /issm/trunk/src/m/classes/slr.m
===================================================================
--- /issm/trunk/src/m/classes/slr.m	(revision 24685)
+++ /issm/trunk/src/m/classes/slr.m	(revision 24686)
@@ -25,5 +25,4 @@
 		rotation               = 0;
 		ocean_area_scaling     = 0;
-		steric_rate            = 0; %rate of ocean expansion from steric effects.
 		hydro_rate             = 0; %rate of steric expansion from hydrological effects.
 		geodetic_run_frequency = 1; %how many time steps we skip before we run the geodetic part of the solver during transient
@@ -34,4 +33,5 @@
 		Ngia                   = NaN;
 		Ugia                   = NaN;
+
 		requested_outputs      = {};
 		transitions            = {};
@@ -80,6 +80,5 @@
 		self.degacc=.01;
 
-		%steric: 
-		self.steric_rate=0;
+		%hydro
 		self.hydro_rate=0;
 	
@@ -99,5 +98,7 @@
 		function md = checkconsistency(self,md,solution,analyses) % {{{
 
-			if ~ismember('SealevelriseAnalysis',analyses), return; end
+			if ~ismember('SealevelriseAnalysis',analyses) | (strcmp(solution,'TransientSolution') & md.transient.isslr==0), 
+				return; 
+			end
 			md = checkfield(md,'fieldname','slr.deltathickness','NaN',1,'Inf',1,'size',[md.mesh.numberofelements 1]);
 			md = checkfield(md,'fieldname','slr.sealevel','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
@@ -116,5 +117,4 @@
 			md = checkfield(md,'fieldname','slr.maxiter','size',[1 1],'>=',1);
 			md = checkfield(md,'fieldname','slr.geodetic_run_frequency','size',[1 1],'>=',1);
-			md = checkfield(md,'fieldname','slr.steric_rate','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
 			md = checkfield(md,'fieldname','slr.hydro_rate','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices 1]);
 			md = checkfield(md,'fieldname','slr.degacc','size',[1 1],'>=',1e-10);
@@ -176,5 +176,4 @@
 			fielddisplay(self,'angular_velocity','mean rotational velocity of earth [per second]'); 
 			fielddisplay(self,'ocean_area_scaling','correction for model representation of ocean area [default: No correction]'); 
-			fielddisplay(self,'steric_rate','rate of steric ocean expansion (in mm/yr)'); 
 			fielddisplay(self,'hydro_rate','rate of hydrological expansion (in mm/yr)'); 
 			fielddisplay(self,'Ngia','rate of viscous (GIA) geoid expansion (in mm/yr)'); 
@@ -214,5 +213,4 @@
 			WriteData(fid,prefix,'object',self,'fieldname','ocean_area_scaling','format','Boolean');
 			WriteData(fid,prefix,'object',self,'fieldname','geodetic_run_frequency','format','Integer');
-			WriteData(fid,prefix,'object',self,'fieldname','steric_rate','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
 			WriteData(fid,prefix,'object',self,'fieldname','hydro_rate','format','DoubleMat','mattype',1,'timeserieslength',md.mesh.numberofvertices+1,'yts',md.constants.yts,'scale',1e-3/md.constants.yts);
 			WriteData(fid,prefix,'object',self,'fieldname','Ngia','format','DoubleMat','mattype',1,'scale',1e-3/md.constants.yts);
@@ -256,5 +254,4 @@
 			writejsdouble(fid,[modelname '.slr.ocean_area_scaling'],self.ocean_area_scaling);
 			writejsdouble(fid,[modelname '.slr.geodetic_run_frequency'],self.geodetic_run_frequency);
-			writejs1Darray(fid,[modelname '.slr.steric_rate'],self.steric_rate);
 			writejs1Darray(fid,[modelname '.slr.hydro_rate'],self.hydro_rate);
 			writejsdouble(fid,[modelname '.slr.degacc'],self.degacc);
Index: /issm/trunk/src/m/classes/slr.py
===================================================================
--- /issm/trunk/src/m/classes/slr.py	(revision 24685)
+++ /issm/trunk/src/m/classes/slr.py	(revision 24686)
@@ -127,5 +127,5 @@
     def checkconsistency(self, md, solution, analyses):  # {{{
         #Early return
-        if (solution != 'SealevelriseAnalysis'):
+        if (solution != 'SealevelriseAnalysis') or (solution == 'TransientSolution' and not md.transient.isslr):
             return md
 
Index: /issm/trunk/src/m/classes/thermal.m
===================================================================
--- /issm/trunk/src/m/classes/thermal.m	(revision 24685)
+++ /issm/trunk/src/m/classes/thermal.m	(revision 24686)
@@ -93,5 +93,5 @@
 				md = checkfield(md,'fieldname','thermal.watercolumn_upperlimit','>=',0);
 
-				%Make sure the spc are less than melting point
+				%Make sure the spc are less than melting point (Josh commented out the next 2 lines)
 				TEMP=md.thermal.spctemperature(1:md.mesh.numberofvertices,:);
 				replicate=repmat(md.geometry.surface-md.mesh.z,1,size(md.thermal.spctemperature,2));
Index: /issm/trunk/src/m/classes/thermal.py
===================================================================
--- /issm/trunk/src/m/classes/thermal.py	(revision 24685)
+++ /issm/trunk/src/m/classes/thermal.py	(revision 24686)
@@ -15,5 +15,5 @@
 
     def __init__(self):  # {{{
-        self.spctemperature = float('NaN')
+        self.spctemperature = np.nan
         self.penalty_threshold = 0
         self.stabilization = 0
@@ -107,5 +107,5 @@
             md = checkfield(md, 'fieldname', 'thermal.watercolumn_upperlimit', '>=', 0)
 
-            TEMP = md.thermal.spctemperature[:-1].flatten(-1)
+            TEMP = md.thermal.spctemperature[:-1].flatten()
             pos = np.where(~np.isnan(TEMP))
             try:
@@ -114,7 +114,7 @@
                 spccol = 1
 
-            replicate = np.tile(md.geometry.surface - md.mesh.z, (spccol)).flatten(-1)
+            replicate = np.tile(md.geometry.surface - md.mesh.z, (spccol)).flatten()
             control = md.materials.meltingpoint - md.materials.beta * md.materials.rho_ice * md.constants.g * replicate + 1.0e-5
-            md = checkfield(md, 'fieldname', 'thermal.spctemperature', 'field', md.thermal.spctemperature.flatten(-1)[pos], '<=', control[pos], 'message', "spctemperature should be below the adjusted melting point")
+            md = checkfield(md, 'fieldname', 'thermal.spctemperature', 'field', md.thermal.spctemperature.flatten()[pos], '<=', control[pos], 'message', "spctemperature should be below the adjusted melting point")
             md = checkfield(md, 'fieldname', 'thermal.isenthalpy', 'numel', [1], 'values', [0, 1])
             md = checkfield(md, 'fieldname', 'thermal.isdynamicbasalspc', 'numel', [1], 'values', [0, 1])
Index: /issm/trunk/src/m/classes/toolkits.m
===================================================================
--- /issm/trunk/src/m/classes/toolkits.m	(revision 24685)
+++ /issm/trunk/src/m/classes/toolkits.m	(revision 24686)
@@ -83,4 +83,21 @@
 			 analyses=properties(self);
 			 for i=1:numel(analyses),
+				 switch analyses{i}
+					 case 'DefaultAnalysis'
+					 case 'RecoveryAnalysis'
+					 case 'StressbalanceAnalysis'
+					 case 'MasstransportAnalysis'
+					 case 'ThermalAnalysis'
+					 case 'BalancethicknessAnalysis'
+					 case 'Balancethickness2Analysis'
+					 case 'BalancethicknessSoftAnalysis'
+					 case 'BalancevelocityAnalysis'
+					 case 'DamageEvolutionAnalysis'
+					 case 'LoveAnalysis'
+					 case 'EsaAnalysis'
+					 case 'SealevelriseAnalysis'
+					 otherwise
+						md = checkmessage(md,['md.toolkits.' analyses{i} ' not supported yet']);
+				 end
 				 if isempty(fieldnames(self.(analyses{i})))
 					 md = checkmessage(md,['md.toolkits.' analyses{i} ' is empty']);
Index: /issm/trunk/src/m/consistency/ismodelselfconsistent.m
===================================================================
--- /issm/trunk/src/m/consistency/ismodelselfconsistent.m	(revision 24685)
+++ /issm/trunk/src/m/consistency/ismodelselfconsistent.m	(revision 24686)
@@ -71,5 +71,5 @@
 		analyses={'EsaAnalysis'};
 	elseif strcmp(solutiontype,'TransientSolution')
-		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis','MasstransportAnalysis','HydrologyShaktiAnalysis','HydrologyGladsAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis'};
+		analyses={'StressbalanceAnalysis','StressbalanceVerticalAnalysis','StressbalanceSIAAnalysis','L2ProjectionBaseAnalysis','ThermalAnalysis','MeltingAnalysis','EnthalpyAnalysis','MasstransportAnalysis','HydrologyShaktiAnalysis','HydrologyGladsAnalysis','HydrologyDCInefficientAnalysis','HydrologyDCEfficientAnalysis','SealevelriseAnalysis'};
 	elseif strcmp(solutiontype,'SealevelriseSolution')
 		analyses={'SealevelriseAnalysis'};
Index: /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.m
===================================================================
--- /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.m	(revision 24686)
@@ -1,4 +1,4 @@
-function export_netCDF(md,filename)	
-	
+function export_netCDF(md,filename)
+
 %Now going on Real treatment
 	if exist(filename),
@@ -17,7 +17,7 @@
 	mode = bitor(mode,netcdf.getConstant('NC_NOCLOBBER'));%NOCLOBBER to avoid overwrite
 	ncid = netcdf.create(filename,mode);
-	netcdf.putAtt(ncid,netcdf.getConstant('NC_GLOBAL'),'Title',['Results for run ' md.miscellaneous.name]);
-	netcdf.putAtt(ncid,netcdf.getConstant('NC_GLOBAL'),'Date',['Created ' datestr(now)]);
-	
+	netcdf.putAtt(ncid,netcdf.getConstant('NC_GLOBAL'),'description',['Results for run ' md.miscellaneous.name]);
+	netcdf.putAtt(ncid,netcdf.getConstant('NC_GLOBAL'),'history',['Created ' datestr(now)]);
+
 	%gather geometry and timestepping as dimensions
 	resfields=fieldnames(md.results);
@@ -27,8 +27,8 @@
 	else
 		StepNum=1;
-  end							
+  end
 
    dimlist=[40,2,md.mesh.numberofelements,md.mesh.numberofvertices,size(md.mesh.elements,2)];
- 
+
 	%define netcdf dimensions
 	DimSize(1).index=netcdf.defDim(ncid,'Dimension1',StepNum);
@@ -44,5 +44,5 @@
 
 	typelist=[{'numeric'} {'logical'} {'string'} {'char'} {'cell'}];
- 
+
 	%get all model classes and create respective groups
 	groups=fieldnames(md);
@@ -79,5 +79,5 @@
 						elseif isprop(Var{l},'step')
 							lname=Var{l}.step
-						else 
+						else
 							lname=[class(Var{l}) int2str(l)];
 						end
@@ -260,5 +260,5 @@
 		end
 	end
-	%if we have a cell variable we need to add a stringlength dimension 
+	%if we have a cell variable we need to add a stringlength dimension
 	if isa(Var,'struct'),
 		if DimValue(3)~=2
Index: /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.py
===================================================================
--- /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.py	(revision 24685)
+++ /issm/trunk/src/m/contrib/defleurian/netCDF/export_netCDF.py	(revision 24686)
@@ -80,5 +80,5 @@
                                     Var = md.__dict__[group].__dict__[field].__getitem__(listindex)[subfield]
                                 DimDict = CreateVar(NCData, Var, subfield, Listgroup, DimDict, md.__dict__[group], field, listindex)
-    # No subgroup, we directly treat the variable
+            # No subgroup, we directly treat the variable
             elif type(md.__dict__[group].__dict__[field]) in typelist or field == 'bamg':
                 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__)
@@ -87,6 +87,6 @@
             elif md.__dict__[group].__dict__[field] is None:
                 print('field md.{}.{} is None'.format(group, field))
-    # do nothing
-    # if it is a masked array
+            # do nothing
+            # if it is a masked array
             elif type(md.__dict__[group].__dict__[field]) is np.ma.core.MaskedArray:
                 NCgroup.__setattr__('classtype', md.__dict__[group].__class__.__name__)
@@ -175,8 +175,8 @@
         print(('WARNING type "{}" is unknown for "{}.{}"'.format(val_type, Group.name, field)))
     return DimDict
+
+
 # ============================================================================
-    # retriev the dimension tuple from a dictionnary
-
-
+# retriev the dimension tuple from a dictionnary
 def GetDim(NCData, val_shape, val_type, DimDict, val_dim):
     output = []
Index: /issm/trunk/src/m/contrib/defleurian/paraview/exportVTK.m
===================================================================
--- /issm/trunk/src/m/contrib/defleurian/paraview/exportVTK.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/defleurian/paraview/exportVTK.m	(revision 24686)
@@ -53,7 +53,13 @@
 		if(size(sol_struct{i},2)>num_of_timesteps);
 			num_of_timesteps=size(sol_struct{i},2);
-      outstep=model.timestepping.time_step*model.settings.output_frequency;
-	  end
-  end
+			if isa(model.timestepping,'timesteppingadaptive')
+				disp('Warning: timesteppingadaptive not totally supported!');				
+			elseif isa(model.timestepping,'timestepping')
+				outstep=model.timestepping.time_step*model.settings.output_frequency;
+			else
+				error('timestepping class not supported!');
+			end
+		end
+	end
 else
 	num_of_timesteps=1;
Index: /issm/trunk/src/m/contrib/defleurian/paraview/exportVTK.py
===================================================================
--- /issm/trunk/src/m/contrib/defleurian/paraview/exportVTK.py	(revision 24685)
+++ /issm/trunk/src/m/contrib/defleurian/paraview/exportVTK.py	(revision 24686)
@@ -94,4 +94,5 @@
     if enveloppe:
         if dim == 3:
+            mesh_alti = '1'
             is_enveloppe = np.logical_or(md.mesh.vertexonbase, md.mesh.vertexonsurface)
             enveloppe_index = np.where(is_enveloppe)[0]
@@ -267,5 +268,5 @@
                 tensors = [field for field in fieldnames if field[-2:] in ['xx', 'yy', 'xy', 'zz', 'xz', 'yz']]
                 non_tensor = [field for field in fieldnames if field not in tensors]
-                vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z']]
+                vectors = [field for field in non_tensor if field[-1] in ['x', 'y', 'z'] and field[-4:] not in ['Flux']]
 
     #check which field is a real result and print
Index: /issm/trunk/src/m/contrib/elmer/readelmermesh.m
===================================================================
--- /issm/trunk/src/m/contrib/elmer/readelmermesh.m	(revision 24686)
+++ /issm/trunk/src/m/contrib/elmer/readelmermesh.m	(revision 24686)
@@ -0,0 +1,21 @@
+function md=readelmermesh(domainfile)
+
+	%Read node file
+	filename = [domainfile '.nodes'];
+	disp(['Reading ' filename]);
+	nodes = load(filename);
+
+	%Get coordinates
+	x = nodes(:,3);
+	y = nodes(:,4);
+
+	%Read element file
+	filename = [domainfile '.elements'];
+	disp(['Reading ' filename]);
+	elements = load(filename);
+
+	%Get indices
+	index = elements(:,4:6);
+
+	%Convert mesh
+	md=meshconvert(model,index,x,y);
Index: /issm/trunk/src/m/contrib/larour/legendd.m
===================================================================
--- /issm/trunk/src/m/contrib/larour/legendd.m	(revision 24686)
+++ /issm/trunk/src/m/contrib/larour/legendd.m	(revision 24686)
@@ -0,0 +1,27 @@
+function legendd(varargin)
+
+	options=pairoptions(varargin{:});
+
+	%retrieve arguments: 
+	x=getfieldvalue(options,'x',0); 
+	y=getfieldvalue(options,'y',0);
+	w=getfieldvalue(options,'w',1);
+	h=getfieldvalue(options,'h',1);
+	facecolor=getfieldvalue(options,'FaceColor','w');
+	edgecolor=getfieldvalue(options,'EdgeColor','k');
+	strings=getfieldvalue(options,'strings',{});
+	colors=getfieldvalue(options,'colors',{});
+	fontsize=getfieldvalue(options,'FontSize',12);
+	linewidth=getfieldvalue(options,'LineWidth',2);
+
+	hold on;
+	rectangle('Position',[x,y,w,h],'FaceColor',facecolor,'EdgeColor',edgecolor);
+	
+	nl=length(strings);
+	for i=1:nl,
+		l=line([x+w/6 x+w/3],[y+(nl+1-i)*h/(nl+1) y+(nl+1-i)*h/(nl+1)]);
+		set(l,'Color',colors{i});
+		set(l,'LineWidth',linewidth);
+		text(x+1.3*w/3,y+(nl+1-i)*h/(nl+1),strings{i},'FontSize',fontsize);
+	end
+
Index: /issm/trunk/src/m/contrib/morlighem/ad/rescalegradient.m
===================================================================
--- /issm/trunk/src/m/contrib/morlighem/ad/rescalegradient.m	(revision 24686)
+++ /issm/trunk/src/m/contrib/morlighem/ad/rescalegradient.m	(revision 24686)
@@ -0,0 +1,53 @@
+function grad_out = rescalegradient(md,grad_in);
+%RESCALEGRADIENT - rescale gradient using mass matrix
+%
+%   Usage:
+%      grad_out = rescalegradient(md,grad_in);
+
+   %Define index
+	index = md.mesh.elements;
+
+	%Get surface areas of all elements
+	A = GetAreas(index,md.mesh.x,md.mesh.y);
+
+	%Preallocate to speed up computation
+	disp('Constructing mass matrix...');
+	tic
+	row   = zeros(10*md.mesh.numberofvertices);
+	col   = zeros(10*md.mesh.numberofvertices);
+	value = zeros(10*md.mesh.numberofvertices);
+
+	%Construct mass matrix using MATLAB's sparse function
+	count = 0;
+	for n=1:md.mesh.numberofelements
+		for l=1:3
+			for k=1:3
+				count=count+1;
+				row(count) = index(n,k);
+				col(count) = index(n,l);
+				if l == k
+					value(count) = A(n)/6.;  % \int_E phi_i * phi_i dE = A/6
+				else
+					value(count) = A(n)/12.; % \int_E phi_i * phi_i dE = A/12
+				end 
+			end
+		end
+	end
+
+	%Delete unused elements
+	row = row(1:count);
+	col = col(1:count);
+	value = value(1:count);
+
+	%Make mass matrix
+	M=sparse(row,col,value);
+	toc
+
+	tic
+	disp('Solving...');
+	grad_out = M\grad_in;
+	toc
+
+	disp('Adjusting output');
+	pos = find(grad_in==0);
+	grad_out(pos)==0;
Index: /issm/trunk/src/m/contrib/morlighem/modeldata/interpMouginotAntTimeSeries1973to2018.m
===================================================================
--- /issm/trunk/src/m/contrib/morlighem/modeldata/interpMouginotAntTimeSeries1973to2018.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/morlighem/modeldata/interpMouginotAntTimeSeries1973to2018.m	(revision 24686)
@@ -1,3 +1,3 @@
-function [vxout vyout]= interpMouginotAntTimeSeries1973to2018(X,Y,T)
+function [vxout vyout errxout erryout stdxout stdyout]= interpMouginotAntTimeSeries1973to2018(X,Y,T)
 %INTERPMOUGINOTANTTIMESERIES1973TO2018 - interpolate observed (time series) velocities 
 %
@@ -45,6 +45,8 @@
 %      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986 1988; 1991 1992; 1995 1996; 2000 2001]);
 %
-%      Another example:
+%      Another examples:
 %      [vxout vyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1973 1975; 1973 1988; 1991 1992; 2011 2012]);
+%      [vel]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986; 1991; 1995; 2000]);
+%      [vxout vyout errxout erryout stdxout stdyout]= interpMouginotAntTimeSeries1973to2018(md.mesh.x,md.mesh.y,[1986; 1991; 1995; 2000]);
 
 %read data
@@ -119,4 +121,8 @@
 	error('nargin not supported yet!');
 end
+if nargout~=1 & nargout~=2 & nargout~=6
+	error('nargout not supported!');
+end
+
 
 % get the spatial positions
@@ -136,4 +142,10 @@
 vxdata = [];
 vydata = [];
+if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+	errxdata = [];
+	errydata = [];
+	stdxdata = [];
+	stdydata = [];
+end
 for i=1:length(pos), 
 	disp(['      step = ' int2str(i) '/' int2str(length(pos)) ', position = ' int2str(pos(i)) ', year = '  int2str(year1(pos(i))) ' - ' int2str(year2(pos(i)))]);
@@ -142,4 +154,14 @@
 	vxdata(:,:,i) = permute(vx,[2 1 3]);
 	vydata(:,:,i) = permute(vy,[2 1 3]);
+	if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+		errx = double(ncread(nc,'ERRX',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));
+		erry = double(ncread(nc,'ERRY',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));	
+		stdx = double(ncread(nc,'STDX',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));
+		stdy = double(ncread(nc,'STDY',[id1x id1y pos(i)],[id2x-id1x+1 id2y-id1y+1 1],[1 1 1]));	
+		errxdata(:,:,i) = permute(errx,[2 1 3]);
+		errydata(:,:,i) = permute(erry,[2 1 3]);
+		stdxdata(:,:,i) = permute(stdx,[2 1 3]);
+		stdydata(:,:,i) = permute(stdy,[2 1 3]);
+	end
 end
 xdata=xdata(id1x:id2x);
@@ -149,8 +171,20 @@
 vxout = [];
 vyout = [];
+if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+	errxout = [];
+	erryout = [];
+	stdxout = [];
+	stdyout = [];
+end
 for i=1:length(pos),
 	disp(['      step = ' int2str(i) '/' int2str(length(pos)) ', position = ' int2str(pos(i)) ', year = '  int2str(year1(pos(i))) ' - ' int2str(year2(pos(i)))]);
 	vxout = [vxout InterpFromGrid(xdata,ydata,vxdata(:,:,i),double(X),double(Y))];
 	vyout = [vyout InterpFromGrid(xdata,ydata,vydata(:,:,i),double(X),double(Y))];
+	if nargout==6 % it includes ERRX, ERRY, STDX and STDY
+		errxout = [errxout InterpFromGrid(xdata,ydata,errxdata(:,:,i),double(X),double(Y))];
+		erryout = [erryout InterpFromGrid(xdata,ydata,errydata(:,:,i),double(X),double(Y))];
+		stdxout = [stdxout InterpFromGrid(xdata,ydata,stdxdata(:,:,i),double(X),double(Y))];
+		stdyout = [stdyout InterpFromGrid(xdata,ydata,stdydata(:,:,i),double(X),double(Y))];
+	end
 end
 
Index: /issm/trunk/src/m/contrib/morlighem/modeldata/interpPaolo2015.m
===================================================================
--- /issm/trunk/src/m/contrib/morlighem/modeldata/interpPaolo2015.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/morlighem/modeldata/interpPaolo2015.m	(revision 24686)
@@ -1,4 +1,6 @@
 function [dh_raw_out dh_fil_out T_out] = interpPaolo2015(X,Y,T,method)
 %INTERPPAOLO2015 - interpolate observed (time series) height change [m]
+%
+%   Time series are average height changes [m] with respect to 1994 every three months (72 time steps)
 %
 %   Inputs
@@ -148,5 +150,9 @@
 	if size(T,2)>1 | size(T,1)<1 | size(T,2)<1,
 		error('Size of input T not supported!');
-	end %}}}
+	end 
+	if size(X,1)>1 & size(X,2)>1
+		error('Size of input X not supported! X and Y should be vectors');
+	end
+	%}}}
 	% Loop over T
 	pos = [];
Index: /issm/trunk/src/m/contrib/tsantos/mismip/gl_position.m
===================================================================
--- /issm/trunk/src/m/contrib/tsantos/mismip/gl_position.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/tsantos/mismip/gl_position.m	(revision 24686)
@@ -2,13 +2,20 @@
 
 		%initialization of some variables
-		data					= md.results.TransientSolution(step).MaskGroundediceLevelset;
-		if(isfield(md.results.TransientSolution,'MeshElements'))
-			index					= md.results.TransientSolution(step).MeshElements;
-			x						= md.results.TransientSolution(step).MeshX;
-			y						= md.results.TransientSolution(step).MeshY;
+		if isfield(md.results,'TransientSolution')
+			data		= md.results.TransientSolution(step).MaskGroundediceLevelset;
+			if isfield(md.results.TransientSolution,'MeshElements')
+				index	= md.results.TransientSolution(step).MeshElements;
+				x		= md.results.TransientSolution(step).MeshX;
+				y		= md.results.TransientSolution(step).MeshY;
+			else
+				index	= md.mesh.elements;
+				x		= md.mesh.x;
+				y		= md.mesh.y;
+			end
 		else
-			index					= md.mesh.elements;
-			x						= md.mesh.x;
-			y						= md.mesh.y;
+			data	= md.mask.groundedice_levelset;
+			index	= md.mesh.elements;
+			x		= md.mesh.x;
+			y		= md.mesh.y;
 		end
 		numberofelements	= size(index,1);
Index: /issm/trunk/src/m/contrib/tsantos/mismip/gl_position_static.m
===================================================================
--- /issm/trunk/src/m/contrib/tsantos/mismip/gl_position_static.m	(revision 24686)
+++ /issm/trunk/src/m/contrib/tsantos/mismip/gl_position_static.m	(revision 24686)
@@ -0,0 +1,200 @@
+function [glx gly] = gl_position(md,step,level),
+
+		%initialization of some variables
+		if isfield(md.results,'TransientSolution') & false
+			data		= md.results.TransientSolution(step).MaskGroundediceLevelset;
+			if isfield(md.results.TransientSolution,'MeshElements')
+				index	= md.results.TransientSolution(step).MeshElements;
+				x		= md.results.TransientSolution(step).MeshX;
+				y		= md.results.TransientSolution(step).MeshY;
+			else
+				index	= md.mesh.elements;
+				x		= md.mesh.x;
+				y		= md.mesh.y;
+			end
+		else
+			data	= md.mask.groundedice_levelset;
+			index	= md.mesh.elements;
+			x		= md.mesh.x;
+			y		= md.mesh.y;
+		end
+		numberofelements	= size(index,1);
+		elementslist		= 1:numberofelements;
+		c						= [];
+		h						= [];
+
+		%get unique edges in mesh
+		%1: list of edges
+		edges=[index(:,[1,2]); index(:,[2,3]); index(:,[3,1])];
+		%2: find unique edges
+		[edges,I,J]=unique(sort(edges,2),'rows');
+		%3: unique edge numbers
+		vec=J;
+		%4: unique edges numbers in each triangle (2 triangles sharing the same edge will have
+		%   the same edge number)
+		edges_tria=[vec(elementslist), vec(elementslist+numberofelements), vec(elementslist+2*numberofelements)];
+
+		%segments [nodes1 nodes2]
+		Seg1=index(:,[1 2]);
+		Seg2=index(:,[2 3]);
+		Seg3=index(:,[3 1]);
+
+		%segment numbers [1;4;6;...]
+		Seg1_num=edges_tria(:,1);
+		Seg2_num=edges_tria(:,2);
+		Seg3_num=edges_tria(:,3);
+
+		%value of data on each tips of the segments
+		Data1=data(Seg1);
+		Data2=data(Seg2);
+		Data3=data(Seg3);
+
+		%get the ranges for each segment
+		Range1=sort(Data1,2);
+		Range2=sort(Data2,2);
+		Range3=sort(Data3,2);
+
+		%find the segments that contain this value
+		pos1=(Range1(:,1)<level & Range1(:,2)>level);
+		pos2=(Range2(:,1)<level & Range2(:,2)>level);
+		pos3=(Range3(:,1)<level & Range3(:,2)>level);
+
+		%get elements
+		poselem12=(pos1 & pos2);
+		poselem13=(pos1 & pos3);
+		poselem23=(pos2 & pos3);
+		poselem=find(poselem12 | poselem13 | poselem23);
+		numelems=length(poselem);
+
+		%if no element has been flagged, skip to the next level
+		if numelems==0,
+			return,
+		end
+
+		%go through the elements and build the coordinates for each segment (1 by element)
+		x1=zeros(numelems,1);
+		x2=zeros(numelems,1);
+		y1=zeros(numelems,1);
+		y2=zeros(numelems,1);
+		edge_l=zeros(numelems,2);
+
+		for j=1:numelems,
+
+			weight1=(level-Data1(poselem(j),1))/(Data1(poselem(j),2)-Data1(poselem(j),1));
+			weight2=(level-Data2(poselem(j),1))/(Data2(poselem(j),2)-Data2(poselem(j),1));
+			weight3=(level-Data3(poselem(j),1))/(Data3(poselem(j),2)-Data3(poselem(j),1));
+
+			if poselem12(poselem(j));
+
+				x1(j)=x(Seg1(poselem(j),1))+weight1*(x(Seg1(poselem(j),2))-x(Seg1(poselem(j),1)));
+				x2(j)=x(Seg2(poselem(j),1))+weight2*(x(Seg2(poselem(j),2))-x(Seg2(poselem(j),1)));
+				y1(j)=y(Seg1(poselem(j),1))+weight1*(y(Seg1(poselem(j),2))-y(Seg1(poselem(j),1)));
+				y2(j)=y(Seg2(poselem(j),1))+weight2*(y(Seg2(poselem(j),2))-y(Seg2(poselem(j),1)));
+				edge_l(j,1)=Seg1_num(poselem(j));
+				edge_l(j,2)=Seg2_num(poselem(j));
+
+			elseif poselem13(poselem(j)),
+
+				x1(j)=x(Seg1(poselem(j),1))+weight1*(x(Seg1(poselem(j),2))-x(Seg1(poselem(j),1)));
+				x2(j)=x(Seg3(poselem(j),1))+weight3*(x(Seg3(poselem(j),2))-x(Seg3(poselem(j),1)));
+				y1(j)=y(Seg1(poselem(j),1))+weight1*(y(Seg1(poselem(j),2))-y(Seg1(poselem(j),1)));
+				y2(j)=y(Seg3(poselem(j),1))+weight3*(y(Seg3(poselem(j),2))-y(Seg3(poselem(j),1)));
+				edge_l(j,1)=Seg1_num(poselem(j));
+				edge_l(j,2)=Seg3_num(poselem(j));
+
+			elseif poselem23(poselem(j)),
+
+				x1(j)=x(Seg2(poselem(j),1))+weight2*(x(Seg2(poselem(j),2))-x(Seg2(poselem(j),1)));
+				x2(j)=x(Seg3(poselem(j),1))+weight3*(x(Seg3(poselem(j),2))-x(Seg3(poselem(j),1)));
+				y1(j)=y(Seg2(poselem(j),1))+weight2*(y(Seg2(poselem(j),2))-y(Seg2(poselem(j),1)));
+				y2(j)=y(Seg3(poselem(j),1))+weight3*(y(Seg3(poselem(j),2))-y(Seg3(poselem(j),1)));
+				edge_l(j,1)=Seg2_num(poselem(j));
+				edge_l(j,2)=Seg3_num(poselem(j));
+			else
+				%it shoud not go here
+			end
+		end
+
+		%now that we have the segments, we must try to connect them...
+
+		%loop over the subcontours
+		indice=0;
+		while ~isempty(edge_l),
+			indice=indice+1;
+
+			%take the right edge of the second segment and connect it to the next segments if any
+			e1=edge_l(1,1);   e2=edge_l(1,2);
+			xc=[x1(1);x2(1)]; yc=[y1(1);y2(1)];
+
+			%erase the lines corresponding to this edge
+			edge_l(1,:)=[];
+			x1(1)=[]; x2(1)=[];
+			y1(1)=[]; y2(1)=[];
+
+			[ro1,co1]=find(edge_l==e1);
+
+			while ~isempty(ro1)
+
+				if co1==1,
+					xc=[x2(ro1);xc]; yc=[y2(ro1);yc];
+
+					%next edge:
+					e1=edge_l(ro1,2);
+
+				else
+					xc=[x1(ro1);xc]; yc=[y1(ro1);yc];
+
+					%next edge:
+					e1=edge_l(ro1,1);
+				end
+
+				%erase the lines of this
+				edge_l(ro1,:)=[];
+				x1(ro1)=[]; x2(ro1)=[];
+				y1(ro1)=[]; y2(ro1)=[];
+
+				%next connection
+				[ro1,co1]=find(edge_l==e1);
+			end
+
+			%same thing the other way (to the right)
+			[ro2,co2]=find(edge_l==e2);
+
+			while ~isempty(ro2)
+
+				if co2==1,
+					xc=[xc;x2(ro2)]; yc=[yc;y2(ro2)];
+
+					%next edge:
+					e2=edge_l(ro2,2);
+				else
+					xc=[xc;x1(ro2)]; yc=[yc;y1(ro2)];
+
+					%next edge:
+					e2=edge_l(ro2,1);
+				end
+
+				%erase the lines of this
+				edge_l(ro2,:)=[];
+				x1(ro2)=[]; x2(ro2)=[];
+				y1(ro2)=[]; y2(ro2)=[];
+
+				%next connection
+				[ro2,co2]=find(edge_l==e2);
+			end
+
+			% Update the CS data structure as per "contours.m"
+			% so that clabel works
+			c = horzcat(c,[level, xc'; length(xc), yc']);
+	%		y0=find(c(2,:)==0);
+	%		y50=find(c(2,:)==50000);
+	%		gl0(glstep)=c(1,y0);
+	%		gl50(glstep)=c(1,y50);
+	glx=c(1,1:end)';
+	gly=c(2,1:end)';
+	pos=find(glx~=0);
+	glx=glx(pos);
+	gly=gly(pos);
+
+end
+	%min(c(1,2:end))
Index: /issm/trunk/src/m/contrib/tsantos/mismip/writeNetCDF.m
===================================================================
--- /issm/trunk/src/m/contrib/tsantos/mismip/writeNetCDF.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/tsantos/mismip/writeNetCDF.m	(revision 24686)
@@ -12,6 +12,11 @@
 
 	%Inserting time 0. md0 must be last experiment (e.g., Ice1r for Ice1ra)
-	x							= md0.results.TransientSolution(end).MeshX;
-	y							= md0.results.TransientSolution(end).MeshY;
+	if(isfield(md0.results.TransientSolution,'MeshElements'))
+      x     = md0.results.TransientSolution(end).MeshX;
+      y     = md0.results.TransientSolution(end).MeshY;
+   else
+      x     = md0.mesh.x;
+      y     = md0.mesh.y;
+   end	
 	time(1)					= 0;
 	[xgl_step ygl_step]	= gl_position(md0,length(md0.results.TransientSolution),0);
@@ -26,6 +31,11 @@
 
    for i=2:length(step),
-		x = md.results.TransientSolution(step(i)).MeshX;
-		y = md.results.TransientSolution(step(i)).MeshY;
+		if(isfield(md.results.TransientSolution,'MeshElements'))
+			x     = md.results.TransientSolution(step(i)).MeshX;
+			y     = md.results.TransientSolution(step(i)).MeshY;
+		else
+			x     = md.mesh.x;
+			y     = md.mesh.y;
+		end	
 		time(i)=md.results.TransientSolution(step(i)).time;	
 		[xgl_step ygl_step]=gl_position(md,step(i),0);
Index: /issm/trunk/src/m/contrib/tsantos/remesh.m
===================================================================
--- /issm/trunk/src/m/contrib/tsantos/remesh.m	(revision 24685)
+++ /issm/trunk/src/m/contrib/tsantos/remesh.m	(revision 24686)
@@ -1,4 +1,7 @@
-function mdOut = remesh(md,parfile)
+function mdOut = remesh(md,parfile,time_step)
 %Set the new mesh (refined) into the model md
+if nargin<3,
+	time_step=length(md.results.TransientSolution);
+end
 
 NewModel = model;
@@ -6,7 +9,7 @@
 % geometry
 NewModel.mesh				= mesh2d();
-NewModel.mesh.x			= md.results.TransientSolution(end).MeshX;
-NewModel.mesh.y			= md.results.TransientSolution(end).MeshY;
-NewModel.mesh.elements	= md.results.TransientSolution(end).MeshElements;
+NewModel.mesh.x			= md.results.TransientSolution(time_step).MeshX;
+NewModel.mesh.y			= md.results.TransientSolution(time_step).MeshY;
+NewModel.mesh.elements	= md.results.TransientSolution(time_step).MeshElements;
 
 % build segments. CONVEX HULL: IT JUST WORKS FOR REGULAR MESHES, WITHOUT "BAYS"
@@ -28,5 +31,5 @@
 
 	if segments(s,3)==0,
-		error('Element not found!');
+		%error('Element not found!');
 	end
 end
@@ -51,14 +54,14 @@
 
 % Setting initialization
-NewModel.initialization.vx				= md.results.TransientSolution(end).Vx;
-NewModel.initialization.vy				= md.results.TransientSolution(end).Vy;
+NewModel.initialization.vx				= md.results.TransientSolution(time_step).Vx;
+NewModel.initialization.vy				= md.results.TransientSolution(time_step).Vy;
 NewModel.initialization.vz				= zeros(md.mesh.numberofvertices,1);
-NewModel.initialization.vel			= md.results.TransientSolution(end).Vel;
-NewModel.initialization.pressure    = md.results.TransientSolution(end).Pressure;
-NewModel.geometry.surface				= md.results.TransientSolution(end).Surface;
-NewModel.geometry.base					= md.results.TransientSolution(end).Base;
-NewModel.geometry.bed					= md.results.TransientSolution(end).Bed;%md.geometry.bed; %use from parameterize
-NewModel.geometry.thickness			= md.results.TransientSolution(end).Thickness;
-NewModel.mask.groundedice_levelset  = md.results.TransientSolution(end).MaskGroundediceLevelset;
+NewModel.initialization.vel			= md.results.TransientSolution(time_step).Vel;
+NewModel.initialization.pressure    = md.results.TransientSolution(time_step).Pressure;
+NewModel.geometry.surface				= md.results.TransientSolution(time_step).Surface;
+NewModel.geometry.base					= md.results.TransientSolution(time_step).Base;
+NewModel.geometry.bed					= md.results.TransientSolution(time_step).Bed;%md.geometry.bed; %use from parameterize
+NewModel.geometry.thickness			= md.results.TransientSolution(time_step).Thickness;
+NewModel.mask.groundedice_levelset  = md.results.TransientSolution(time_step).MaskGroundediceLevelset;
     
 %copy other data
Index: /issm/trunk/src/m/extrusion/project2d.m
===================================================================
--- /issm/trunk/src/m/extrusion/project2d.m	(revision 24685)
+++ /issm/trunk/src/m/extrusion/project2d.m	(revision 24686)
@@ -27,9 +27,13 @@
 end
 
-if size(value,1)==md3d.mesh.numberofvertices,
+if numel(value)==1
+	projection_value=value;
+elseif size(value,1)==md3d.mesh.numberofvertices,
 	projection_value=value((layer-1)*md3d.mesh.numberofvertices2d+1:layer*md3d.mesh.numberofvertices2d,:);
-elseif size(value,1)==md3d.mesh.numberofvertices+1,
+elseif size(value,1)==md3d.mesh.numberofvertices+1
 	projection_value=[value((layer-1)*md3d.mesh.numberofvertices2d+1:layer*md3d.mesh.numberofvertices2d,:); value(end,:)];
+elseif size(value,1)==md3d.mesh.numberofelements
+	projection_value=value((layer-1)*md3d.mesh.numberofelements2d+1:layer*md3d.mesh.numberofelements2d,:);
 else
-	projection_value=value((layer-1)*md3d.mesh.numberofelements2d+1:layer*md3d.mesh.numberofelements2d,:);
+	error('Dimensions not supported yet');
 end
Index: /issm/trunk/src/m/extrusion/project2d.py
===================================================================
--- /issm/trunk/src/m/extrusion/project2d.py	(revision 24685)
+++ /issm/trunk/src/m/extrusion/project2d.py	(revision 24686)
@@ -40,5 +40,5 @@
         projection_value = value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d]
     elif value.shape[0] == md3d.mesh.numberofvertices + 1:
-        projection_value = [value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d], value[-1]]
+        projection_value = np.vstack((value[(layer - 1) * md3d.mesh.numberofvertices2d:layer * md3d.mesh.numberofvertices2d], value[-1]))
     else:
         projection_value = value[(layer - 1) * md3d.mesh.numberofelements2d:layer * md3d.mesh.numberofelements2d]
Index: /issm/trunk/src/m/io/loadvars.py
===================================================================
--- /issm/trunk/src/m/io/loadvars.py	(revision 24685)
+++ /issm/trunk/src/m/io/loadvars.py	(revision 24686)
@@ -127,8 +127,8 @@
                                 t = int(indexlist[i])
                                 if listtype == 'dict':
-                                    Tree[t][str(var)] = varval[0]
-
-                                else:
-                                    Tree[t].__dict__[str(var)] = varval[0]
+                                    Tree[t][str(var)] = varval[0].data
+
+                                else:
+                                    Tree[t].__dict__[str(var)] = varval[0].data
 
                             else:
@@ -144,5 +144,4 @@
                                 else:
                                     Tree.__dict__[str(var)] = varval[0].item()
-
                         elif vardim == 1:
                             if varval.dtype == str:
@@ -160,9 +159,8 @@
                                     t = int(indexlist[i])
                                     if listtype == 'dict':
-                                        Tree[t][str(var)] = varval[:]
+                                        Tree[t][str(var)] = varval[:].data
 
                                     else:
-                                        Tree[t].__dict__[str(var)] = varval[:]
-
+                                        Tree[t].__dict__[str(var)] = varval[:].data
                                 else:
                                     try:
@@ -189,7 +187,7 @@
                                     t = int(indexlist[i])
                                     if listtype == 'dict':
-                                        Tree[t][str(var)] = varval[:, :]
+                                        Tree[t][str(var)] = varval[:, :].data
                                     else:
-                                        Tree[t].__dict__[str(var)] = varval[:, :]
+                                        Tree[t].__dict__[str(var)] = varval[:, :].data
                                 else:
                                     Tree.__dict__[str(var)] = varval[:, :].data
@@ -198,5 +196,5 @@
                                 t = int(indexlist[i])
                                 if listtype == 'dict':
-                                    Tree[t][str(var)] = varval[:, :, :]
+                                    Tree[t][str(var)] = varval[:, :, :].data
                                 else:
                                     Tree[t].__dict__[str(var)] = varval[:, :, :]
Index: /issm/trunk/src/m/mesh/ExportGmsh.m
===================================================================
--- /issm/trunk/src/m/mesh/ExportGmsh.m	(revision 24685)
+++ /issm/trunk/src/m/mesh/ExportGmsh.m	(revision 24686)
@@ -8,5 +8,5 @@
 fid=fopen(filename,'w');
 
-%initialiaion
+%initialization
 fprintf(fid,'$MeshFormat \n');
 fprintf(fid,'2.2 0 8 \n');
Index: /issm/trunk/src/m/mesh/ExportXml.m
===================================================================
--- /issm/trunk/src/m/mesh/ExportXml.m	(revision 24686)
+++ /issm/trunk/src/m/mesh/ExportXml.m	(revision 24686)
@@ -0,0 +1,46 @@
+function ExportXml(md,filename)
+%EXPORTGMSH - export mesh to xml format (For FEniCS)
+%
+%   Usage:
+%      ExportXml(md,filename)
+
+t1=clock;fprintf('%s',['writing xml mesh file']);
+fid=fopen(filename,'w');
+
+%initialization
+fprintf(fid,'<?xml version="1.0" encoding="UTF-8"?>\n');
+fprintf(fid,'\n');
+fprintf(fid,'<dolfin xmlns:dolfin="http://www.fenicsproject.org">\n');
+if isa(md.mesh,'mesh2d')
+	fprintf(fid,'  <mesh celltype="triangle" dim="2">\n');
+elseif isa(md.mesh,'mesh3dtetras')
+	fprintf(fid,'  <mesh celltype="tetrahedron" dim="3">\n');
+else
+	error('only triangles and tets are supported');
+end
+
+%printing point positions
+fprintf(fid,'    <vertices size="%i">\n',md.mesh.numberofvertices);
+for i=1:md.mesh.numberofvertices
+	fprintf(fid,'      <vertex index="%i" x="%17.15e" y="%17.15e" z="%17.15e"/>\n',i-1,md.mesh.x(i),md.mesh.y(i),md.mesh.z(i));
+end
+fprintf(fid,'    </vertices>\n');
+fprintf(fid,'    <cells size="%i">\n',md.mesh.numberofelements);
+if isa(md.mesh,'mesh2d')
+	for i=1:md.mesh.numberofelements
+		fprintf(fid,'      <triangle index="%i" v0="%i" v1="%i" v2="%i"/>\n',i-1,md.mesh.elements(i,1)-1,md.mesh.elements(i,2)-1,md.mesh.elements(i,3)-1);
+	end
+elseif isa(md.mesh,'mesh3dtetras')
+	for i=1:md.mesh.numberofelements
+		fprintf(fid,'      <tetrahedron index="%i" v0="%i" v1="%i" v2="%i" v3="%i"/>\n',i-1,md.mesh.elements(i,1)-1,md.mesh.elements(i,2)-1,md.mesh.elements(i,3)-1,md.mesh.elements(i,4)-1);
+	end
+else
+	error('only triangles and tets are supported');
+end
+fprintf(fid,'    </cells>\n');
+fprintf(fid,'  </mesh>\n');
+fprintf(fid,'</dolfin>\n');
+
+%close
+fclose(fid);
+t2=clock;fprintf('%s\n',[' done (' num2str(etime(t2,t1)) ' seconds)']);
Index: /issm/trunk/src/m/mesh/roundmesh.py
===================================================================
--- /issm/trunk/src/m/mesh/roundmesh.py	(revision 24685)
+++ /issm/trunk/src/m/mesh/roundmesh.py	(revision 24686)
@@ -17,11 +17,9 @@
           md = roundmesh(md, radius, resolution)
     """
+    # First we have to create the domain outline
+    # Get number of points on the circle
+    pointsonedge = int(np.floor((2. * np.pi * radius) / resolution) + 1)  # + 1 to close the outline
 
-    #First we have to create the domain outline
-
-    #Get number of points on the circle
-    pointsonedge = np.floor((2. * np.pi * radius) / resolution) + 1  # + 1 to close the outline
-
-    #Calculate the cartesians coordinates of the points
+    # Calculate the cartesians coordinates of the points
     theta = np.linspace(0., 2. * np.pi, pointsonedge)
     x_list = roundsigfig(radius * np.cos(theta), 12)
@@ -33,14 +31,14 @@
     expwrite(A, 'RoundDomainOutline.exp')
 
-    #Call Bamg
+    # Call Bamg
     md = triangle(md, 'RoundDomainOutline.exp', resolution)
-    #md = bamg(md, 'domain', 'RoundDomainOutline.exp', 'hmin', resolution)
+    # md = bamg(md, 'domain', 'RoundDomainOutline.exp', 'hmin', resolution)
 
-    #move the closest node to the center
+    # move the closest node to the center
     pos = np.argmin(md.mesh.x**2 + md.mesh.y**2)
     md.mesh.x[pos] = 0.
     md.mesh.y[pos] = 0.
 
-    #delete domain
+    # delete domain
     os.remove('RoundDomainOutline.exp')
 
Index: /issm/trunk/src/m/miscellaneous/normfit_issm.py
===================================================================
--- /issm/trunk/src/m/miscellaneous/normfit_issm.py	(revision 24685)
+++ /issm/trunk/src/m/miscellaneous/normfit_issm.py	(revision 24686)
@@ -5,5 +5,4 @@
 
 def normfit_issm(x, alpha=None):
-
     if alpha is None:
         alpha = 0.05
Index: /issm/trunk/src/m/miscellaneous/transientrestart.m
===================================================================
--- /issm/trunk/src/m/miscellaneous/transientrestart.m	(revision 24685)
+++ /issm/trunk/src/m/miscellaneous/transientrestart.m	(revision 24686)
@@ -40,4 +40,5 @@
 if isfield(results,'Waterfraction'),md.initialization.waterfraction=results.Waterfraction; end
 if isfield(results,'Watercolumn'), md.initialization.watercolumn=results.Watercolumn; end
+if isfield(results,'Enthalpy'),    md.initialization.enthalpy=results.Enthalpy; end
 
 %Deal with new geometry
Index: /issm/trunk/src/m/parameterization/reinitializelevelset.m
===================================================================
--- /issm/trunk/src/m/parameterization/reinitializelevelset.m	(revision 24685)
+++ /issm/trunk/src/m/parameterization/reinitializelevelset.m	(revision 24686)
@@ -5,4 +5,12 @@
 %      levelsetnew = reinitializelevelset(md,levelset)
 
+% if md is 3d, levelset should be projected on a 2d mesh 
+
+if isempty(levelset), error('levelset provided is empty'); end
+if dimension(md.mesh)==3,
+   if length(levelset)~=md.mesh.numberofvertices2d, error('levelset provided should be specified at the 2d vertices of the mesh'); end
+else
+   if length(levelset)~=md.mesh.numberofvertices, error('levelset provided should be specified at the vertices of the mesh'); end
+end
 
 %First: extract segments
@@ -10,7 +18,14 @@
 
 %Now, make this a distance field (might not be closed)
-levelsetnew=abs(ExpToLevelSet(md.mesh.x,md.mesh.y,contours));
+levelsetnew=abs(ExpToLevelSet(md.mesh.x,md.mesh.y,contours)); % levelsetnew comes on the 3d vertices, if mesh is 3d
 
 %Finally, change sign
-pos = find(levelset<0);
-levelsetnew(pos) = -levelsetnew(pos);
+pos=find(levelset<0); % if mesh is 3d, it refers to the vertices on the base
+if dimension(md.mesh)==3
+	for i=1:md.mesh.numberoflayers
+		pos3d=pos+(i-1)*md.mesh.numberofvertices2d;
+		levelsetnew(pos3d)=-levelsetnew(pos3d);
+	end	
+else
+	levelsetnew(pos)=-levelsetnew(pos);
+end
Index: /issm/trunk/src/m/plot/applyoptions.m
===================================================================
--- /issm/trunk/src/m/plot/applyoptions.m	(revision 24685)
+++ /issm/trunk/src/m/plot/applyoptions.m	(revision 24686)
@@ -285,4 +285,20 @@
 end
 
+%shpdisp3d
+if exist(options,'shpdisp3d'),
+	filename=(getfieldvalue(options,'shpdisp3d'));
+	style=(getfieldvalue(options,'shpstyle',{'r.-'}));
+	linewidth=(getfieldvalue(options,'linewidth',1));
+	for i=1:length(getfieldvalue(options,'shpdisp3d')),
+		filenamei=filename{i};
+		stylei=style{i};
+		if length(linewidth)==1,
+			linewidthi=linewidth;
+		else
+			linewidthi=linewidth{i};
+		end
+		shpdisp3d(filenamei,'figure',1,'style',stylei,'linewidth',linewidthi);
+	end
+end
 
 
Index: /issm/trunk/src/m/plot/applyoptions.py
===================================================================
--- /issm/trunk/src/m/plot/applyoptions.py	(revision 24685)
+++ /issm/trunk/src/m/plot/applyoptions.py	(revision 24686)
@@ -236,6 +236,4 @@
             colorbarfontsize = options.getfieldvalue('colorbarfontsize')
             cb.ax.tick_params(labelsize=colorbarfontsize)
-    # cb.set_ticks([0, -10])
-    # cb.set_ticklabels([-10, 0, 10])
         if options.exist('colorbarticks'):
             colorbarticks = options.getfieldvalue('colorbarticks')
Index: /issm/trunk/src/m/plot/landsatmap.m
===================================================================
--- /issm/trunk/src/m/plot/landsatmap.m	(revision 24686)
+++ /issm/trunk/src/m/plot/landsatmap.m	(revision 24686)
@@ -0,0 +1,137 @@
+% Explain
+%  upload landsatmap to md.radaroverlay
+%
+% Usage
+%  md = landsatmap(md);
+%  md = landsatmap(md,'highres',1);
+function md = landsatmap(md,varargin),
+
+% check input variables
+if nargin == 1 ,% {{{
+	options = pairoptions;
+else
+	options = pairoptions(varargin{:});
+end% }}}
+
+%process mesh and data
+[x y z elements is2d isplanet]=processmesh(md,[],options);
+
+%check is2d
+if ~is2d,
+   error('buildgridded error message: gridded not supported for 3d meshes, project on a layer');
+end
+
+% get xlim, and ylim
+xlim    = getfieldvalue(options,'xlim',[min(x) max(x)])/getfieldvalue(options,'unit',1);
+ylim    = getfieldvalue(options,'ylim',[min(y) max(y)])/getfieldvalue(options,'unit',1);
+highres = getfieldvalue(options,'highres',0);
+
+if md.mesh.epsg == 3031 % Antarctica region {{{
+	if highres, % high resolution geotiff file
+		if 1, disp('   LIMA with geotiff'), % {{{
+			disp('WARNING : this image shoud be collected with geocoded tif file');
+			% find merged mosaic landsat image {{{
+			limapath = {'/drive/project_inwoo/issm/Data/LIMA/AntarcticaLandsat.tif';
+				'/home/DATA/ICESHEET/LIMA/AntarcticaLandsat.tif'};
+			pos = zeros(length(limapath),1);
+			for ii = 1:length(limapath)
+				if exist(limapath{ii}), pos(ii) = 1; end
+			end
+			limapath = limapath{find(pos)};
+			fprintf('   LIMA path is %s\n', limapath);
+			% }}}
+
+			% read image
+			im = imread(limapath);
+
+			% Region of LIMA data set
+			info = gdalinfo(limapath); % get geotiff info
+			xm = info.xmin + info.dx*[0:info.nx-1];
+			ym = info.ymax - info.dy*[0:info.ny-1];
+
+			%disp('   find region of model at LIMA');
+			offset = 1e+4;
+			posx = find((xm > xlim(1)-offset).* (xm < xlim(2)+offset));
+			posy = find((ym > ylim(1)-offset).* (ym < ylim(2)+offset));
+		end % }}}
+	else
+		if 1, disp('   LIMA with jp2'), % {{{
+			% find merged mosaic landsat image {{{
+			limapath = {'/drive/project_inwoo/issm/Data/LIMA/jp2_100pct/00000-20080314-144756363.jp2';
+				'/data/project_inwoo/issm/Data/LIMA/jp2_100pct/00000-20080314-144756363.jp2';
+				'/home/DATA/ICESHEET/LIMA/jp2_100pct/00000-20080314-144756363.jp2'};
+			pos = zeros(length(limapath),1);
+			for ii = 1:length(limapath)
+				if exist(limapath{ii}), pos(ii) = 1; end
+			end
+			
+			if sum(pos) == 0,
+				fprintf('download website : https://lima.usgs.gov/fullcontinent.php\n');
+				error('Landsat image at Antarctic region should be downloaded at above website');
+			end
+			limapath = limapath{find(pos)};
+			fprintf('   LIMA path is %s\n', limapath);
+			% }}}
+
+			% Resolution and coordinates of upper left corner:
+			xres = 240.010503438;
+			yres = -240.000000516;
+			xul = -2668154.98388;
+			yul = 2362214.96998;
+
+			% Arrays of pixel coordinates:
+			xm = [xul:xres:2.813684914643920e+06];
+			ym = [yul:yres:-2.294505040031947e+06];
+
+			% reduction level 3 corresponds to very 2^3 = 8 points 
+			rlevel = 2;
+			xm = xm(1:2^rlevel:end);
+			ym = ym(1:2^rlevel:end);
+			im = imread(limapath,'reductionlevel',rlevel);
+
+			%disp('   find region of model at LIMA');
+			offset = 1e+4;
+			posx = find((xm > xlim(1)-offset).* (xm < xlim(2)+offset));
+			posy = find((ym > ylim(1)-offset).* (ym < ylim(2)+offset));
+		end % }}}
+		if 0, disp('   LIMA with reduced tiff'), % {{{
+			% find merged mosaic landsat image {{{
+			limapath = {'/drive/project_inwoo/issm/Data/LIMA/tiff_90pct/00000-20080319-092059124.tif'};
+			pos = zeros(length(limapath),1);
+			for ii = 1:length(limapath)
+				if exist(limapath{ii}), pos(ii) = 1; end
+			end
+			
+			if sum(pos) == 0,
+				fprintf('download website : https://lima.usgs.gov/fullcontinent.php\n');
+				error('Landsat image at Antarctic region should be downloaded at above website');
+			end
+			limapath = limapath{find(pos)};
+			fprintf('   LIMA path is %s\n', limapath);
+			% }}}
+
+			% read image
+			im = imread(limapath);
+
+			% Region of LIMA data set
+			info = gdalinfo(limapath); % get geotiff info
+			xm = info.xmin + info.dx*[0:info.nx-1];
+			ym = info.ymax - info.dy*[0:info.ny-1];
+
+			%disp('   find region of model at LIMA');
+			offset = 1e+4;
+			posx = find((xm > xlim(1)-offset).* (xm < xlim(2)+offset));
+			posy = find((ym > ylim(1)-offset).* (ym < ylim(2)+offset));
+		end % }}}
+	end
+
+	% update region of radaroverlay
+	md.radaroverlay.x = xm(posx);
+	md.radaroverlay.y = ym(posy);
+	md.radaroverlay.pwr = im(posy, posx,:);
+
+	% }}}
+else
+	error('Check md.mesh.epsg, available LIMA regeion is at Antarctica (EPSG:3031)');
+end
+
Index: /issm/trunk/src/m/plot/manualcb.m
===================================================================
--- /issm/trunk/src/m/plot/manualcb.m	(revision 24685)
+++ /issm/trunk/src/m/plot/manualcb.m	(revision 24686)
@@ -1,3 +1,3 @@
-function manualcb(zmin,zmax,cmap,varargin)
+function h=manualcb(zmin,zmax,cmap,varargin)
 %MANUALCB - custom colorbar
 %
@@ -12,4 +12,6 @@
 %      - 'orientation' : 'vertical' (default) or 'horizontal'
 %      - 'title'       : colorbar title
+%      - 'xlabel'      : colorbar x-label
+%      - 'ylabel'      : colorbar y-label
 %      - 'tick'        : specified values of tick labels
 %      - 'ticksep'     : spacing between ticks
@@ -133,4 +135,6 @@
 		th=title(getfieldvalue(options,'title'),'FontSize',fontsize,'Color',fontcolor);
 		set(th,'Position',[ytick(end)+0.075,-0.3]);
+	elseif getfieldvalue(options,'inverttickposition',0)==1,
+		text(1.9,.7,getfieldvalue(options,'ylabel'),'HorizontalAlignment','right','VerticalAlignment','middle','FontSize',fontsize,'Color',fontcolor,'rotation',90);
 	else
 		ylabel(getfieldvalue(options,'ylabel'),'FontSize',fontsize,'Color',fontcolor);
@@ -139,4 +143,5 @@
 	
 %Back to original axes
+h=gca;
 if getfieldvalue(options,'showregion',0)==0,
 	%Do it this way in order to preserve the figure visibility
Index: /issm/trunk/src/m/plot/plot_basaldrag.m
===================================================================
--- /issm/trunk/src/m/plot/plot_basaldrag.m	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_basaldrag.m	(revision 24686)
@@ -20,15 +20,24 @@
 r=averaging(md,md.friction.q./md.friction.p,0);
 
-%compute horizontal velocity
+ub_mag=sqrt(md.initialization.vx.^2+md.initialization.vy.^2)/md.constants.yts;
+drag_mag=(max(md.constants.g*(md.materials.rho_ice*md.geometry.thickness+md.materials.rho_water*md.geometry.base),0)).^r.*(md.friction.coefficient).^2.*ub_mag.^s/1000;
+sig=1;
+
+%compute horizontal velocity 
 if strcmpi(type,'basal_drag')
 	ub=sqrt(md.initialization.vx.^2+md.initialization.vy.^2)/md.constants.yts;
+	title_str='Basal drag [kPa]';
 elseif strcmpi(type,'basal_dragx')
 	ub=md.initialization.vx/md.constants.yts;
+	sig=-1;
+	title_str='Basal drag - x direction [kPa]';
 elseif strcmpi(type,'basal_dragy')
 	ub=md.initialization.vy/md.constants.yts;
+	sig=-1;
+	title_str='Basal drag - y direction [kPa]';
 end
 
 %compute basal drag
-drag=(max(md.constants.g*(md.materials.rho_ice*md.geometry.thickness+md.materials.rho_water*md.geometry.base),0)).^r.*(md.friction.coefficient).^2.*ub.^s/1000;
+drag=sig*drag_mag.*ub./ub_mag;
 
 %Figure out if this is a Section plot
@@ -47,5 +56,5 @@
 
 	%apply options
-	options=addfielddefault(options,'title','Basal drag [kPa]');
+	options=addfielddefault(options,'title',title_str);	
 	options=addfielddefault(options,'view',2);
 	applyoptions(md,basal_drag,options);
Index: /issm/trunk/src/m/plot/plot_coastlines.m
===================================================================
--- /issm/trunk/src/m/plot/plot_coastlines.m	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_coastlines.m	(revision 24686)
@@ -1,4 +1,9875 @@
 function plot_coastlines(mesh,varargin);
 
+%define coastline: {{{
+coast=[-83.83 -180 ;...
+-84.33 -178 ;...
+-84.5 -174 ;...
+-84.67 -170 ;...
+-84.92 -166 ;...
+-85.42 -163 ;...
+-85.42 -158 ;...
+-85.58 -152 ;...
+-85.33 -146 ;...
+-84.83 -147 ;...
+-84.5 -151 ;...
+-84 -153.5 ;...
+-83.5 -153 ;...
+-83 -154 ;...
+-82.5 -154 ;...
+-82 -154 ;...
+-81.5 -154.5 ;...
+-81.17 -153 ;...
+-81 -150 ;...
+-80.92 -146.5 ;...
+-80.67 -145.5 ;...
+-80.33 -148 ;...
+-80 -150 ;...
+-79.67 -152.5 ;...
+-79.25 -155 ;...
+-78.83 -157 ;...
+-78.7478 -157.255 ;...
+-78.6654 -157.507 ;...
+-78.5828 -157.755 ;...
+-78.5 -158 ;...
+-78.4806 -157.666 ;...
+-78.4608 -157.333 ;...
+-78.4406 -157.001 ;...
+-78.42 -156.67 ;...
+-78.5 -154.5 ;...
+-78.17 -154.5 ;...
+-78.17 -154.5 ;...
+-78.08 -156.67 ;...
+-77.83 -158 ;...
+-77.5 -158.33 ;...
+-77.17 -158.67 ;...
+-77 -157 ;...
+-77.17 -154 ;...
+-77.58 -153 ;...
+-77.83 -150.5 ;...
+-77.67 -148 ;...
+-77.25 -146 ;...
+-76.75 -146 ;...
+-76.33 -146.33 ;...
+-75.92 -146.67 ;...
+-75.83 -144 ;...
+-75.58 -142 ;...
+-75.75 -140.67 ;...
+-75.42 -140 ;...
+-75.25 -138.33 ;...
+-75 -136.67 ;...
+-74.75 -135.5 ;...
+-74.75 -133 ;...
+-75 -132 ;...
+-75.42 -130.83 ;...
+-75.75 -129.67 ;...
+-76 -128 ;...
+-76 -127 ;...
+-75.67 -126.83 ;...
+-75.17 -126.33 ;...
+-74.67 -125.5 ;...
+-74.33 -124.5 ;...
+-74.33 -124.5 ;...
+-73.83 -123.33 ;...
+-73.25 -123.67 ;...
+-73.25 -122.33 ;...
+-73.75 -121.33 ;...
+-74 -120 ;...
+-73.58 -120 ;...
+-73.6037 -119.584 ;...
+-73.6266 -119.167 ;...
+-73.6487 -118.749 ;...
+-73.67 -118.33 ;...
+-73.6712 -117.915 ;...
+-73.6716 -117.5 ;...
+-73.6712 -117.085 ;...
+-73.67 -116.67 ;...
+-73.7101 -116.586 ;...
+-73.7501 -116.501 ;...
+-73.7901 -116.416 ;...
+-73.83 -116.33 ;...
+-73.9158 -116.66 ;...
+-74.001 -116.993 ;...
+-74.0858 -117.33 ;...
+-74.17 -117.67 ;...
+-74.295 -117.67 ;...
+-74.42 -117.67 ;...
+-74.545 -117.67 ;...
+-74.67 -117.67 ;...
+-74.7957 -117.343 ;...
+-74.921 -117.011 ;...
+-75.0457 -116.673 ;...
+-75.17 -116.33 ;...
+-75.1929 -115.668 ;...
+-75.2138 -115.004 ;...
+-75.2329 -114.338 ;...
+-75.25 -113.67 ;...
+-75 -112.33 ;...
+-74.5 -112.5 ;...
+-74.375 -112.543 ;...
+-74.25 -112.586 ;...
+-74.125 -112.628 ;...
+-74 -112.67 ;...
+-73.9177 -112.5 ;...
+-73.8353 -112.332 ;...
+-73.7527 -112.165 ;...
+-73.67 -112 ;...
+-74 -111.5 ;...
+-74.5 -111 ;...
+-75 -110.33 ;...
+-75.17 -108.67 ;...
+-74.67 -108.5 ;...
+-74.5 -109.5 ;...
+-74 -109.5 ;...
+-74 -108.33 ;...
+-74.33 -108 ;...
+-74.67 -108.5 ;...
+-75.08 -107.5 ;...
+-75.08 -106 ;...
+-74.92 -104.5 ;...
+-74.92 -104.5 ;...
+-74.58 -104.5 ;...
+-74.58 -102.67 ;...
+-74.75 -101.33 ;...
+-75 -100 ;...
+-74.75 -99.5 ;...
+-74.33 -101.33 ;...
+-74 -100.67 ;...
+-73.75 -101.5 ;...
+-73.5 -101.5 ;...
+-73.58 -100 ;...
+-73.42 -99 ;...
+-73.08 -100 ;...
+-73.25 -101.33 ;...
+-72.75 -102.5 ;...
+-72.67 -101.33 ;...
+-73 -101.5 ;...
+-72.92 -99.67 ;...
+-73.33 -98.5 ;...
+-73.08 -98 ;...
+-73.42 -97.5 ;...
+-73.75 -97.5 ;...
+-73.75 -96.33 ;...
+-73.42 -96 ;...
+-73.08 -96 ;...
+-72.92 -97.5 ;...
+-72.5 -98.5 ;...
+-72.17 -99.67 ;...
+-72.1084 -100.006 ;...
+-72.0461 -100.339 ;...
+-71.9833 -100.671 ;...
+-71.92 -101 ;...
+-71.8782 -100.706 ;...
+-71.8359 -100.412 ;...
+-71.7932 -100.121 ;...
+-71.75 -99.83 ;...
+-72 -98.33 ;...
+-72 -98.33 ;...
+-71.83 -97 ;...
+-72.08 -95.67 ;...
+-72.42 -95.67 ;...
+-72.58 -94 ;...
+-72.75 -92.25 ;...
+-72.92 -90.5 ;...
+-72.92 -88.75 ;...
+-72.92 -87 ;...
+-73.08 -85.5 ;...
+-72.92 -84.33 ;...
+-72.83 -83 ;...
+-72.92 -81.25 ;...
+-73 -79.5 ;...
+-73.25 -78.5 ;...
+-72.83 -78.67 ;...
+-72.42 -78 ;...
+-72.58 -76.5 ;...
+-72.83 -75 ;...
+-73 -74 ;...
+-73.5 -74.5 ;...
+-73.42 -73 ;...
+-73.33 -71.5 ;...
+-73.17 -70 ;...
+-73.08 -68.5 ;...
+-72.83 -67.5 ;...
+-72.33 -67 ;...
+-71.83 -67 ;...
+-71.5 -67.5 ;...
+-70.92 -67.5 ;...
+-70.42 -68 ;...
+-70.42 -68 ;...
+-70 -68.5 ;...
+-69.42 -68.5 ;...
+-69.42 -67.33 ;...
+-69.08 -66.67 ;...
+-68.83 -67.33 ;...
+-68.5 -66.83 ;...
+-68.08 -66.83 ;...
+-67.75 -67.5 ;...
+-67.42 -67.5 ;...
+-67.75 -68.5 ;...
+-67.75 -69.25 ;...
+-67.33 -69.33 ;...
+-66.92 -68.67 ;...
+-66.67 -67.83 ;...
+-67.08 -67.67 ;...
+-67 -67 ;...
+-67.33 -66.33 ;...
+-66.83 -66.33 ;...
+-66.58 -65.5 ;...
+-66.25 -65.83 ;...
+-65.92 -65 ;...
+-65.5 -64 ;...
+-65.08 -64 ;...
+-65.17 -63 ;...
+-64.75 -62.67 ;...
+-64.67 -61.67 ;...
+-64.25 -61.5 ;...
+-64 -61 ;...
+-63.83 -59.67 ;...
+-63.58 -58.83 ;...
+-63.58 -58.83 ;...
+-63.33 -58 ;...
+-63.25 -57.17 ;...
+-63.58 -56.67 ;...
+-63.58 -57.33 ;...
+-63.643 -57.5386 ;...
+-63.7056 -57.7481 ;...
+-63.768 -57.9586 ;...
+-63.83 -58.17 ;...
+-63.8305 -57.94 ;...
+-63.8307 -57.71 ;...
+-63.8305 -57.48 ;...
+-63.83 -57.25 ;...
+-64.42 -57.33 ;...
+-64.33 -58.17 ;...
+-64 -58.67 ;...
+-64.5 -59 ;...
+-64.33 -59.67 ;...
+-64.67 -60.5 ;...
+-65 -61.33 ;...
+-65.42 -62 ;...
+-65.75 -62.17 ;...
+-66.08 -62.33 ;...
+-66.5 -62.67 ;...
+-66.25 -63.5 ;...
+-66.5 -64 ;...
+-66.92 -63.67 ;...
+-66.92 -64.67 ;...
+-67.33 -65.33 ;...
+-67.83 -65.5 ;...
+-67.8928 -65.3339 ;...
+-67.9553 -65.1668 ;...
+-68.0178 -64.9989 ;...
+-68.08 -64.83 ;...
+-68.1426 -64.954 ;...
+-68.2052 -65.0786 ;...
+-68.2676 -65.204 ;...
+-68.33 -65.33 ;...
+-68.4351 -65.2067 ;...
+-68.5402 -65.0823 ;...
+-68.6451 -64.9568 ;...
+-68.75 -64.83 ;...
+-68.75 -63.67 ;...
+-69.08 -62.67 ;...
+-69.67 -62.33 ;...
+-70.17 -62 ;...
+-70.17 -62 ;...
+-70.58 -61.5 ;...
+-70.92 -61.67 ;...
+-71.17 -61 ;...
+-71.58 -60.83 ;...
+-72 -61.67 ;...
+-72.33 -60.83 ;...
+-72.67 -61 ;...
+-72.7531 -60.7116 ;...
+-72.8358 -60.4205 ;...
+-72.9181 -60.1266 ;...
+-73 -59.83 ;...
+-73.0826 -59.9138 ;...
+-73.1651 -59.9984 ;...
+-73.2476 -60.0838 ;...
+-73.33 -60.17 ;...
+-73.42 -61.5 ;...
+-73.75 -61 ;...
+-74.33 -60.67 ;...
+-74.67 -61.67 ;...
+-75.08 -61.67 ;...
+-75.17 -63 ;...
+-75.67 -63 ;...
+-76.08 -63.67 ;...
+-76.5 -65.5 ;...
+-77 -67.33 ;...
+-77.17 -65 ;...
+-77.08 -62.5 ;...
+-77.08 -60 ;...
+-77.25 -57.67 ;...
+-77.58 -56 ;...
+-77.92 -53.67 ;...
+-78.25 -51.67 ;...
+-78.5 -49.33 ;...
+-78.75 -47.67 ;...
+-78.83 -44 ;...
+-78.83 -41 ;...
+-78.83 -41 ;...
+-78.75 -38.5 ;...
+-78.33 -36.33 ;...
+-77.83 -35 ;...
+-77.5 -33.33 ;...
+-77.17 -31.67 ;...
+-76.83 -30 ;...
+-76.5 -28.33 ;...
+-76.08 -27 ;...
+-75.67 -26 ;...
+-75.33 -24 ;...
+-74.75 -23.67 ;...
+-74.42 -22.33 ;...
+-74.08 -20.67 ;...
+-73.75 -19.33 ;...
+-73.42 -17.67 ;...
+-73.08 -16.33 ;...
+-72.92 -14.67 ;...
+-72.83 -13.5 ;...
+-72.58 -12.33 ;...
+-72.25 -11.33 ;...
+-71.92 -11.17 ;...
+-71.67 -12 ;...
+-71.33 -12.17 ;...
+-71.25 -11.5 ;...
+-71.58 -11 ;...
+-71.25 -10.5 ;...
+-71 -9.75 ;...
+-71.17 -9 ;...
+-71.42 -8.67 ;...
+-71.83 -8.5 ;...
+-71.83 -8.5 ;...
+-71.7907 -8.20564 ;...
+-71.7509 -7.91252 ;...
+-71.7107 -7.62064 ;...
+-71.67 -7.33 ;...
+-71.6076 -7.41584 ;...
+-71.5451 -7.50111 ;...
+-71.4826 -7.58583 ;...
+-71.42 -7.67 ;...
+-71.3351 -7.58388 ;...
+-71.2501 -7.49851 ;...
+-71.1651 -7.41389 ;...
+-71.08 -7.33 ;...
+-71.0176 -7.41581 ;...
+-70.9551 -7.50107 ;...
+-70.8926 -7.5858 ;...
+-70.83 -7.67 ;...
+-70.8104 -7.45937 ;...
+-70.7905 -7.24916 ;...
+-70.7704 -7.03937 ;...
+-70.75 -6.83 ;...
+-70.75 -5.83 ;...
+-71 -5.83 ;...
+-71.33 -6 ;...
+-71.33 -4.5 ;...
+-71.25 -3 ;...
+-70.67 -3 ;...
+-70.42 -3.33 ;...
+-70.25 -2.5 ;...
+-70.42 -1.67 ;...
+-70.67 -1 ;...
+-71 -1.5 ;...
+-71.25 -0.67 ;...
+-71.5 0 ;...
+-71.25 1.33 ;...
+-70.92 2.67 ;...
+-70.58 3.5 ;...
+-70.33 4.67 ;...
+-70.33 6.33 ;...
+-70.08 7.33 ;...
+-70.08 8.67 ;...
+-70.17 10 ;...
+-70.08 11.33 ;...
+-70.08 12.67 ;...
+-70.17 14.33 ;...
+-70.17 15.67 ;...
+-70.17 15.67 ;...
+-70.17 17 ;...
+-70.17 18.33 ;...
+-70.33 20 ;...
+-70.42 21.5 ;...
+-70.58 23 ;...
+-70.42 24.67 ;...
+-70.17 25.33 ;...
+-70.17 27 ;...
+-69.92 28.33 ;...
+-69.83 30 ;...
+-69.58 31.33 ;...
+-69.5 32 ;...
+-69.5 33.5 ;...
+-69 33.33 ;...
+-68.67 33.5 ;...
+-68.58 34.33 ;...
+-68.92 35 ;...
+-69.33 36 ;...
+-69.5 37 ;...
+-69.58 38.33 ;...
+-70.08 38.67 ;...
+-69.67 39.33 ;...
+-69.5 39.83 ;...
+-69.17 39.83 ;...
+-68.83 40 ;...
+-68.5 41.33 ;...
+-68.17 42 ;...
+-67.83 43.33 ;...
+-67.75 44.5 ;...
+-67.75 46 ;...
+-67.75 46 ;...
+-67.58 47 ;...
+-67.67 48.17 ;...
+-67.42 48.5 ;...
+-67.42 49.33 ;...
+-67.83 49.83 ;...
+-67.58 50.5 ;...
+-67 50.17 ;...
+-66.5 50.5 ;...
+-66.25 51.5 ;...
+-65.92 52.17 ;...
+-65.83 53.67 ;...
+-65.92 55.17 ;...
+-66.25 56 ;...
+-66.42 57.17 ;...
+-66.67 57.17 ;...
+-66.75 56.33 ;...
+-66.92 57 ;...
+-67 58 ;...
+-67.17 59 ;...
+-67.5 59.33 ;...
+-67.33 60.67 ;...
+-67.58 61.67 ;...
+-67.67 62.67 ;...
+-67.5 63.67 ;...
+-67.58 65 ;...
+-67.75 66.33 ;...
+-67.83 67.67 ;...
+-67.83 68.67 ;...
+-67.67 69.5 ;...
+-68.17 69.5 ;...
+-68.17 69.5 ;...
+-68.58 70 ;...
+-69 70.5 ;...
+-69.17 69.5 ;...
+-69.58 69.83 ;...
+-70.08 69.83 ;...
+-70.5 70.67 ;...
+-70.42 71.67 ;...
+-70.08 71 ;...
+-69.75 72.33 ;...
+-69.58 73.5 ;...
+-69.67 74.5 ;...
+-69.83 75.5 ;...
+-69.5 76.17 ;...
+-69.17 77.17 ;...
+-69.08 78 ;...
+-68.5 78.33 ;...
+-68.25 79 ;...
+-68 80.5 ;...
+-67.83 81.67 ;...
+-67.33 82.5 ;...
+-67.17 83.67 ;...
+-67 85 ;...
+-66.83 86.33 ;...
+-66.67 87.67 ;...
+-66.75 88.5 ;...
+-66.75 89.67 ;...
+-66.67 91 ;...
+-66.5 92 ;...
+-66.58 93 ;...
+-66.58 94 ;...
+-66.58 94 ;...
+-66.5 95 ;...
+-66.67 96 ;...
+-66.5 97 ;...
+-66.67 97.67 ;...
+-66.5 98.5 ;...
+-66.58 99.33 ;...
+-66.42 100 ;...
+-66.42 100.67 ;...
+-66.08 101.5 ;...
+-65.83 102.67 ;...
+-65.92 104 ;...
+-66.08 105 ;...
+-66.25 106 ;...
+-66.5 107 ;...
+-66.58 108 ;...
+-66.83 109 ;...
+-66.58 109.5 ;...
+-66.67 110.33 ;...
+-66.42 110.67 ;...
+-66.08 110.67 ;...
+-65.92 111.67 ;...
+-65.83 112.67 ;...
+-65.75 113.67 ;...
+-66 114.5 ;...
+-66.33 115.5 ;...
+-66.5 116.5 ;...
+-66.75 117.67 ;...
+-66.83 119 ;...
+-66.83 120.33 ;...
+-66.67 121.5 ;...
+-66.67 121.5 ;...
+-66.42 122.5 ;...
+-66.75 123.33 ;...
+-66.75 124.33 ;...
+-66.5 124.83 ;...
+-66.58 125.83 ;...
+-66.25 126.5 ;...
+-66.25 127 ;...
+-66.5 127.67 ;...
+-66.92 128.33 ;...
+-67 129 ;...
+-67.17 129.5 ;...
+-66.92 130 ;...
+-66.33 130 ;...
+-66.08 130.83 ;...
+-66.17 132 ;...
+-66.08 133.33 ;...
+-66.17 134.33 ;...
+-66.08 135.33 ;...
+-66.33 136.33 ;...
+-66.33 137.33 ;...
+-66.5 138.33 ;...
+-66.58 139.33 ;...
+-66.67 140.5 ;...
+-66.75 142 ;...
+-67 142.5 ;...
+-66.83 143.33 ;...
+-67 144.33 ;...
+-67.5 144.33 ;...
+-67.58 145 ;...
+-67.5 146 ;...
+-67.5 146 ;...
+-67.83 146.83 ;...
+-68.25 147 ;...
+-68.42 148.33 ;...
+-68.25 149 ;...
+-68.5 150.33 ;...
+-68.33 151 ;...
+-68.67 151.5 ;...
+-68.58 152.67 ;...
+-68.58 154.33 ;...
+-69.08 155 ;...
+-69.33 156 ;...
+-69 156.5 ;...
+-69.08 157.5 ;...
+-69.25 158.67 ;...
+-69.5 160 ;...
+-69.67 161 ;...
+-70.08 161 ;...
+-70.5 161.17 ;...
+-70.83 161.67 ;...
+-70.58 162.17 ;...
+-70.17 162.33 ;...
+-70.08 163.33 ;...
+-70.58 163.17 ;...
+-70.58 164.67 ;...
+-70.5 166 ;...
+-70.83 167.33 ;...
+-71.17 168.67 ;...
+-71.5 170 ;...
+-71.17 170.17 ;...
+-71.75 171 ;...
+-71.75 171 ;...
+-72 170.17 ;...
+-72.5 170.5 ;...
+-72.83 169.67 ;...
+-73.17 168.5 ;...
+-73.25 167 ;...
+-73.67 167 ;...
+-74.08 166.67 ;...
+-74.17 165.33 ;...
+-74.5 165.33 ;...
+-74.83 164.33 ;...
+-75.17 163 ;...
+-75.67 162.67 ;...
+-76.08 162.67 ;...
+-76.5 163 ;...
+-76.92 163.33 ;...
+-77.33 163.67 ;...
+-77.75 163.67 ;...
+-78 164.67 ;...
+-78.1056 164.994 ;...
+-78.2108 165.323 ;...
+-78.3156 165.659 ;...
+-78.42 166 ;...
+-78.4413 165.503 ;...
+-78.4617 165.003 ;...
+-78.4813 164.503 ;...
+-78.5 164 ;...
+-78.75 161.5 ;...
+-79.17 160 ;...
+-79.67 160 ;...
+-80 160 ;...
+-80.5 160.33 ;...
+-81 160.33 ;...
+-81.5 161 ;...
+-81.92 162.33 ;...
+-82.42 164 ;...
+-82.83 166 ;...
+-82.83 166 ;...
+-83.25 168.5 ;...
+-83.5 171.5 ;...
+-83.5 176 ;...
+-83.83 180 ;...
+NaN NaN ;...
+-77.58 166.33 ;...
+-77.08 166.67 ;...
+-77.33 168.17 ;...
+-77.42 169.67 ;...
+-77.67 168.17 ;...
+-77.58 166.33 ;...
+NaN NaN ;...
+-78.75 -164 ;...
+-78.75 -161.67 ;...
+-79.08 -160.67 ;...
+-79.5 -160 ;...
+-79.92 -160 ;...
+-80.25 -161.67 ;...
+-79.83 -163.67 ;...
+-79.33 -163.67 ;...
+-79.08 -163 ;...
+-78.75 -164 ;...
+NaN NaN ;...
+-76.58 -150.17 ;...
+-76.17 -148.17 ;...
+-76.2329 -147.923 ;...
+-76.2955 -147.674 ;...
+-76.3579 -147.423 ;...
+-76.42 -147.17 ;...
+-76.5031 -147.497 ;...
+-76.5859 -147.827 ;...
+-76.6682 -148.161 ;...
+-76.75 -148.5 ;...
+-76.58 -150.17 ;...
+NaN NaN ;...
+-71.08 -76.17 ;...
+-71.08 -74.67 ;...
+-71 -73 ;...
+-70.92 -71.83 ;...
+-70.83 -71 ;...
+-70.58 -70.67 ;...
+-70.08 -71 ;...
+-69.58 -71.5 ;...
+-69.08 -71.67 ;...
+-68.92 -71.17 ;...
+-68.83 -70.33 ;...
+-69.42 -69.67 ;...
+-70 -69.33 ;...
+-70.5 -68.83 ;...
+-71 -68.5 ;...
+-71.5 -68.33 ;...
+-71.92 -68.5 ;...
+-72.33 -68.83 ;...
+-72.58 -70 ;...
+-72.67 -71.5 ;...
+-72.58 -73 ;...
+-72.25 -73.33 ;...
+-72.17 -74.33 ;...
+-71.83 -75 ;...
+-71.67 -76 ;...
+-71.33 -76.5 ;...
+-71.08 -76.17 ;...
+NaN NaN ;...
+-70.58 -76.17 ;...
+-70.25 -75.67 ;...
+-70 -75.75 ;...
+-69.75 -74.5 ;...
+-69.92 -73.67 ;...
+-70.25 -73.75 ;...
+-70.5 -74 ;...
+-70.58 -75 ;...
+-70.58 -76.17 ;...
+NaN NaN ;...
+-64.799 -63.6633 ;...
+-64.7634 -64.0273 ;...
+-64.6919 -64.1488 ;...
+-64.5633 -64.0044 ;...
+-64.4418 -63.6482 ;...
+-64.3347 -63.2838 ;...
+-64.5629 -63.0404 ;...
+-64.756 -63.4542 ;...
+-64.799 -63.6633 ;...
+NaN NaN ;...
+-64.5414 -62.5914 ;...
+-64.256 -62.4589 ;...
+-64.0994 -62.4781 ;...
+-64.0851 -62.2525 ;...
+-64.2488 -62.1151 ;...
+-64.5414 -62.5914 ;...
+NaN NaN ;...
+-63.25 -56.5 ;...
+-63 -55.83 ;...
+-63.1055 -55.6248 ;...
+-63.2106 -55.418 ;...
+-63.3155 -55.2098 ;...
+-63.42 -55 ;...
+-63.4407 -55.2495 ;...
+-63.4609 -55.4993 ;...
+-63.4807 -55.7495 ;...
+-63.5 -56 ;...
+-63.25 -56.5 ;...
+NaN NaN ;...
+-62.232 -58.7408 ;...
+-62.1268 -58.7522 ;...
+-62.0075 -58.5804 ;...
+-61.958 -57.9445 ;...
+-61.9859 -57.8248 ;...
+-62.0771 -57.8208 ;...
+-62.1615 -58.0476 ;...
+-62.232 -58.7408 ;...
+NaN NaN ;...
+-62.7883 -60.2334 ;...
+-62.6897 -60.3025 ;...
+-62.6688 -60.7807 ;...
+-62.5069 -60.7731 ;...
+-62.4575 -60.3266 ;...
+-62.4785 -60.0343 ;...
+-62.5699 -59.9605 ;...
+-62.6755 -59.9509 ;...
+-62.7883 -60.2334 ;...
+NaN NaN ;...
+-60.63 -46.33 ;...
+-60.33 -46 ;...
+-60.4232 -45.7521 ;...
+-60.5159 -45.5029 ;...
+-60.6082 -45.2521 ;...
+-60.7 -45 ;...
+-60.6837 -45.333 ;...
+-60.6666 -45.6657 ;...
+-60.6487 -45.9981 ;...
+-60.63 -46.33 ;...
+NaN NaN ;...
+0 -80.08 ;...
+0.67 -80 ;...
+1 -79.33 ;...
+1.25 -78.83 ;...
+1.67 -79 ;...
+1.75 -78.58 ;...
+2.17 -78.67 ;...
+2.67 -78.33 ;...
+2.67 -77.75 ;...
+3.25 -77.5 ;...
+3.67 -77.17 ;...
+4 -77.5 ;...
+4.5 -77.33 ;...
+5 -77.42 ;...
+5.5 -77.42 ;...
+6.17 -77.5 ;...
+6.58 -77.42 ;...
+7 -77.75 ;...
+7.42 -78.17 ;...
+8 -78.42 ;...
+8.33 -78.17 ;...
+8.33 -78.5 ;...
+8.75 -78.75 ;...
+9 -79.17 ;...
+9 -79.5 ;...
+8.58 -79.75 ;...
+8.33 -80.08 ;...
+8.33 -80.08 ;...
+8.17 -80.5 ;...
+7.83 -80.42 ;...
+7.74754 -80.3149 ;...
+7.66505 -80.2099 ;...
+7.58254 -80.1049 ;...
+7.5 -80 ;...
+7.43755 -80.1251 ;...
+7.37507 -80.2501 ;...
+7.31255 -80.3751 ;...
+7.25 -80.5 ;...
+7.17 -80.92 ;...
+7.75 -81 ;...
+7.75 -81.5 ;...
+8.08 -81.75 ;...
+8.25 -82.25 ;...
+8.33 -82.75 ;...
+8.17 -83 ;...
+8.42 -83.25 ;...
+8.5 -83.75 ;...
+9 -83.67 ;...
+9.33 -84 ;...
+9.67 -84.58 ;...
+10 -84.83 ;...
+9.67 -85.17 ;...
+9.83 -85.33 ;...
+9.92 -85.67 ;...
+10.33 -85.83 ;...
+10.75 -85.75 ;...
+11.17 -85.75 ;...
+11.5 -86.17 ;...
+11.83 -86.58 ;...
+12.25 -87 ;...
+12.58 -87.33 ;...
+12.685 -87.4149 ;...
+12.7901 -87.4999 ;...
+12.895 -87.5849 ;...
+13 -87.67 ;...
+13 -87.585 ;...
+13.0001 -87.5 ;...
+13 -87.415 ;...
+13 -87.33 ;...
+13.105 -87.4149 ;...
+13.2101 -87.4999 ;...
+13.315 -87.5849 ;...
+13.42 -87.67 ;...
+13.42 -87.67 ;...
+13.17 -87.83 ;...
+13.17 -88.42 ;...
+13.17 -88.83 ;...
+13.42 -89.33 ;...
+13.42 -89.75 ;...
+13.67 -90 ;...
+13.92 -90.5 ;...
+13.92 -91 ;...
+14.08 -91.5 ;...
+14.33 -92 ;...
+14.67 -92.33 ;...
+15 -92.75 ;...
+15.42 -93.17 ;...
+15.67 -93.42 ;...
+15.92 -93.83 ;...
+16.08 -94.25 ;...
+16.17 -94.67 ;...
+16.17 -95.17 ;...
+15.92 -95.67 ;...
+15.67 -96.25 ;...
+15.83 -96.83 ;...
+16 -97.42 ;...
+16.08 -98 ;...
+16.33 -98.5 ;...
+16.58 -99 ;...
+16.67 -99.5 ;...
+16.92 -100 ;...
+17.08 -100.5 ;...
+17.25 -101 ;...
+17.67 -101.58 ;...
+17.67 -101.58 ;...
+18 -102 ;...
+18 -102.25 ;...
+18.08 -102.83 ;...
+18.33 -103.5 ;...
+18.75 -103.83 ;...
+19.08 -104.33 ;...
+19.33 -104.92 ;...
+19.83 -105.33 ;...
+20.33 -105.67 ;...
+20.58 -105.33 ;...
+20.83 -105.5 ;...
+21.17 -105.17 ;...
+21.5 -105.17 ;...
+21.75 -105.5 ;...
+22.42 -105.67 ;...
+22.75 -106 ;...
+23.17 -106.42 ;...
+23.5 -106.75 ;...
+24 -107.17 ;...
+24.33 -107.58 ;...
+24.67 -108 ;...
+25.25 -108.33 ;...
+25.5 -109 ;...
+25.75 -109.42 ;...
+26 -109.42 ;...
+26.33 -109.25 ;...
+26.67 -109.5 ;...
+26.67 -109.75 ;...
+27 -110 ;...
+27.42 -110.58 ;...
+27.42 -110.58 ;...
+27.92 -110.58 ;...
+28 -111.17 ;...
+28.5 -111.67 ;...
+29 -112.17 ;...
+29.5 -112.42 ;...
+29.92 -112.75 ;...
+30.25 -112.83 ;...
+30.75 -113.08 ;...
+31.17 -113.08 ;...
+31.33 -113.5 ;...
+31.58 -113.83 ;...
+31.5 -114.17 ;...
+31.75 -114.75 ;...
+31.42 -114.83 ;...
+31 -114.75 ;...
+30.58 -114.58 ;...
+30.17 -114.67 ;...
+29.83 -114.42 ;...
+29.58 -114 ;...
+29.17 -113.67 ;...
+28.75 -113.25 ;...
+28.33 -112.92 ;...
+27.83 -112.75 ;...
+27.5 -112.33 ;...
+27 -112 ;...
+26.5 -111.5 ;...
+26 -111.33 ;...
+25.5 -111 ;...
+25.17 -111 ;...
+24.83 -110.67 ;...
+24.83 -110.67 ;...
+24.67 -110.83 ;...
+24.25 -110.58 ;...
+24.25 -110.17 ;...
+23.75 -109.75 ;...
+23.33 -109.42 ;...
+22.83 -110 ;...
+23.5 -110.33 ;...
+23.83 -110.83 ;...
+24.25 -111.25 ;...
+24.58 -111.67 ;...
+24.83 -112.33 ;...
+25.17 -112.08 ;...
+25.67 -112.08 ;...
+26.17 -112.33 ;...
+26.42 -112.83 ;...
+26.75 -113.17 ;...
+26.75 -113.58 ;...
+27 -114 ;...
+27.17 -114.33 ;...
+27.5 -114.5 ;...
+27.83 -115 ;...
+27.75 -114.42 ;...
+28 -114 ;...
+28.42 -114 ;...
+28.75 -114.33 ;...
+29.08 -114.67 ;...
+29.5 -115.17 ;...
+29.67 -115.67 ;...
+30.25 -115.75 ;...
+30.33 -116 ;...
+30.33 -116 ;...
+30.75 -116 ;...
+31 -116.33 ;...
+31.25 -116.33 ;...
+31.58 -116.58 ;...
+32 -116.75 ;...
+32.5 -117.08 ;...
+33 -117.25 ;...
+33.42 -117.5 ;...
+33.75 -118 ;...
+33.75 -118.33 ;...
+34.08 -118.42 ;...
+34.08 -118.75 ;...
+34.25 -119.25 ;...
+34.42 -119.83 ;...
+34.58 -120.58 ;...
+35.08 -120.58 ;...
+35.58 -121.08 ;...
+36 -121.42 ;...
+36.33 -121.92 ;...
+36.58 -121.83 ;...
+37 -121.83 ;...
+37 -122.17 ;...
+37.33 -122.42 ;...
+37.75 -122.5 ;...
+37.5 -122.08 ;...
+37.92 -122.33 ;...
+38.08 -122.17 ;...
+38.17 -122.5 ;...
+37.92 -122.5 ;...
+38 -123 ;...
+38 -123 ;...
+38.33 -122.92 ;...
+38.58 -123.33 ;...
+38.92 -123.67 ;...
+39.33 -123.75 ;...
+39.67 -123.75 ;...
+40 -124 ;...
+40.25 -124.33 ;...
+40.5 -124.33 ;...
+41 -124.08 ;...
+41.5 -124 ;...
+42 -124.17 ;...
+42.33 -124.42 ;...
+42.83 -124.5 ;...
+43.42 -124.25 ;...
+44 -124.08 ;...
+44.5 -124 ;...
+45 -124 ;...
+45.42 -123.83 ;...
+45.83 -123.92 ;...
+46.17 -123.92 ;...
+46.17 -123.17 ;...
+46.33 -123.92 ;...
+46.67 -124 ;...
+47 -124.08 ;...
+47.5 -124.33 ;...
+47.92 -124.58 ;...
+48.17 -124.75 ;...
+48.42 -124.67 ;...
+48.17 -123.83 ;...
+48.17 -123.17 ;...
+48.17 -123.17 ;...
+47.92 -122.58 ;...
+47.5 -123 ;...
+47.67 -122.58 ;...
+47.5 -122.5 ;...
+48 -122.25 ;...
+48.1051 -122.332 ;...
+48.2101 -122.414 ;...
+48.3151 -122.497 ;...
+48.42 -122.58 ;...
+48.46 -122.54 ;...
+48.5 -122.5 ;...
+48.54 -122.46 ;...
+48.58 -122.42 ;...
+48.6851 -122.502 ;...
+48.7901 -122.584 ;...
+48.8951 -122.667 ;...
+49 -122.75 ;...
+49.25 -123.17 ;...
+49.75 -123.17 ;...
+49.5 -123.75 ;...
+49.75 -124.33 ;...
+50.08 -124.92 ;...
+50.42 -125.5 ;...
+50.67 -126.25 ;...
+50.83 -127 ;...
+51.17 -127.75 ;...
+51.5 -127.75 ;...
+51.83 -128.17 ;...
+52.33 -128.42 ;...
+52.58 -129.17 ;...
+53.17 -129.75 ;...
+53.42 -130.33 ;...
+53.92 -130.75 ;...
+54.17 -130.17 ;...
+54.42 -130.42 ;...
+54.92 -130.08 ;...
+54.75 -130.75 ;...
+55 -131.33 ;...
+55.33 -131.75 ;...
+55.33 -131.75 ;...
+55.75 -132.08 ;...
+56.08 -132 ;...
+56.08 -132.5 ;...
+56.25 -133 ;...
+55.83 -132.58 ;...
+55.25 -132.08 ;...
+54.67 -132.08 ;...
+55.08 -132.5 ;...
+55.33 -133.08 ;...
+55.58 -133.42 ;...
+55.92 -133.67 ;...
+56.33 -133.67 ;...
+56.25 -134.17 ;...
+56.75 -134.33 ;...
+57.08 -133.83 ;...
+56.92 -133 ;...
+57.25 -133.42 ;...
+57.58 -133.75 ;...
+57.25 -134 ;...
+57 -134.5 ;...
+57.42 -134.5 ;...
+57.92 -134.75 ;...
+58.25 -134.67 ;...
+58.67 -135 ;...
+58.25 -135.17 ;...
+58.25 -135.67 ;...
+57.92 -135 ;...
+57.42 -135 ;...
+56.83 -134.75 ;...
+56.17 -134.75 ;...
+56.17 -134.75 ;...
+56.75 -135.17 ;...
+57.25 -135.5 ;...
+57.67 -136.17 ;...
+58.17 -136.5 ;...
+58.42 -137.25 ;...
+58.75 -137.92 ;...
+59.17 -138.67 ;...
+59.42 -139.5 ;...
+59.92 -139.67 ;...
+59.67 -140.25 ;...
+59.75 -141.08 ;...
+60 -142 ;...
+60 -143 ;...
+60 -144 ;...
+60.25 -145 ;...
+60.58 -146 ;...
+60.83 -146.67 ;...
+60.83 -147.5 ;...
+61 -148.33 ;...
+60.42 -148.17 ;...
+59.92 -148.67 ;...
+59.92 -149.5 ;...
+59.58 -150.17 ;...
+59.25 -150.92 ;...
+59.17 -151.92 ;...
+59.5 -151.42 ;...
+59.67 -151.92 ;...
+60.17 -151.42 ;...
+60.75 -151.33 ;...
+60.92 -150.42 ;...
+60.92 -150.42 ;...
+61.0027 -150.296 ;...
+61.0852 -150.171 ;...
+61.1677 -150.046 ;...
+61.25 -149.92 ;...
+61.2504 -150.107 ;...
+61.2505 -150.295 ;...
+61.2504 -150.483 ;...
+61.25 -150.67 ;...
+60.92 -151.67 ;...
+60.58 -152.25 ;...
+60.08 -152.5 ;...
+59.67 -153.08 ;...
+59.42 -153.83 ;...
+59.08 -154.08 ;...
+58.92 -153.33 ;...
+58.5 -153.83 ;...
+58.17 -154.33 ;...
+57.83 -155.25 ;...
+57.42 -156.17 ;...
+57 -156.67 ;...
+56.67 -157.67 ;...
+56.42 -158.42 ;...
+56 -158.75 ;...
+55.83 -159.58 ;...
+55.58 -160.5 ;...
+55.42 -161.42 ;...
+55.17 -162 ;...
+55 -162.83 ;...
+54.67 -163.67 ;...
+54.33 -164.75 ;...
+54.58 -164.92 ;...
+55 -163.92 ;...
+55.25 -162.92 ;...
+55.67 -162.33 ;...
+55.92 -161.42 ;...
+56 -160.5 ;...
+56 -160.5 ;...
+56.5 -159.92 ;...
+56.83 -159 ;...
+57.25 -158.33 ;...
+57.58 -157.67 ;...
+58.17 -157.5 ;...
+58.75 -157.42 ;...
+58.67 -158 ;...
+58.75 -158.67 ;...
+58.42 -158.92 ;...
+58.92 -159.58 ;...
+58.92 -161 ;...
+58.75 -161.58 ;...
+59.17 -161.92 ;...
+59.5 -161.67 ;...
+60.08 -162.17 ;...
+59.67 -163 ;...
+59.67 -163.83 ;...
+60.08 -164.33 ;...
+60.58 -164.92 ;...
+61.08 -165.25 ;...
+61.5 -166 ;...
+62.17 -165.5 ;...
+62.67 -164.83 ;...
+63 -164.67 ;...
+63.25 -163.92 ;...
+63 -163 ;...
+63.42 -162.25 ;...
+63.5 -161 ;...
+64 -160.75 ;...
+64.42 -161.25 ;...
+64.42 -161.25 ;...
+64.83 -161 ;...
+64.67 -161.92 ;...
+64.42 -162.67 ;...
+64.58 -163.67 ;...
+64.5 -164.92 ;...
+64.67 -166.33 ;...
+65 -166.67 ;...
+65.33 -166.33 ;...
+65.42 -167.25 ;...
+65.4602 -167.394 ;...
+65.5003 -167.539 ;...
+65.5402 -167.684 ;...
+65.58 -167.83 ;...
+65.6856 -167.583 ;...
+65.7908 -167.334 ;...
+65.8956 -167.083 ;...
+66 -166.83 ;...
+66.25 -165.75 ;...
+66.5 -164.67 ;...
+66.5 -163.83 ;...
+66.08 -163.75 ;...
+66.08 -162.17 ;...
+66.42 -161.75 ;...
+66.83 -162.5 ;...
+67.17 -163.5 ;...
+67.67 -164 ;...
+67.98 -165.25 ;...
+68.33 -166.17 ;...
+68.83 -166 ;...
+68.92 -164.83 ;...
+69.08 -163.5 ;...
+69.45 -163 ;...
+69.92 -162.67 ;...
+70.33 -161.58 ;...
+70.42 -160.42 ;...
+70.83 -159 ;...
+70.83 -159 ;...
+70.87 -157.58 ;...
+71.33 -156.33 ;...
+71.13 -154.83 ;...
+70.87 -154.33 ;...
+70.87 -152.5 ;...
+70.48 -151.5 ;...
+70.48 -150.33 ;...
+70.47 -149 ;...
+70.22 -147.5 ;...
+70.22 -146.17 ;...
+70 -145.08 ;...
+70.08 -144 ;...
+70.08 -142.83 ;...
+69.83 -141.83 ;...
+69.62 -141 ;...
+69.58 -139.5 ;...
+69.32 -138.33 ;...
+69 -137.33 ;...
+68.75 -136 ;...
+69.17 -135.92 ;...
+69.5 -135.17 ;...
+69.58 -134.17 ;...
+69.38 -133.33 ;...
+69.67 -132.58 ;...
+69.98 -131.17 ;...
+70.25 -129.83 ;...
+69.83 -129.42 ;...
+70 -128.5 ;...
+70.58 -128 ;...
+70.08 -126.67 ;...
+70.08 -126.67 ;...
+69.58 -126.17 ;...
+69.42 -125.33 ;...
+70 -124.42 ;...
+69.42 -124.33 ;...
+69.37 -123.5 ;...
+69.83 -123 ;...
+69.83 -121.33 ;...
+69.47 -120.25 ;...
+69.22 -118.83 ;...
+69 -117.5 ;...
+68.97 -116 ;...
+68.72 -114.67 ;...
+68.3 -114.08 ;...
+68.17 -115.08 ;...
+67.83 -115.33 ;...
+67.83 -114.17 ;...
+67.67 -113 ;...
+67.83 -111.58 ;...
+68 -110.25 ;...
+67.75 -109.25 ;...
+67.58 -108.17 ;...
+68 -107.83 ;...
+68.25 -109 ;...
+68.58 -108.25 ;...
+68.75 -106.92 ;...
+68.9 -105.67 ;...
+68.35 -104.58 ;...
+68.03 -103 ;...
+67.75 -101.75 ;...
+67.58 -100 ;...
+67.58 -100 ;...
+67.67 -98.75 ;...
+68.25 -98.67 ;...
+68.58 -98 ;...
+69 -99.58 ;...
+69.33 -98.58 ;...
+69.83 -97.92 ;...
+69.33 -96.42 ;...
+68.75 -95.58 ;...
+68.38 -96.92 ;...
+67.82 -96.83 ;...
+67.5 -95.83 ;...
+67.98 -95.5 ;...
+68 -95.17 ;...
+68.5 -94.25 ;...
+69 -94.67 ;...
+69.42 -94 ;...
+69.83 -95.67 ;...
+70.42 -96.75 ;...
+70.97 -96.25 ;...
+71.5 -95.83 ;...
+71.75 -95 ;...
+72.25 -95 ;...
+72.67 -95 ;...
+73.17 -95.25 ;...
+73.58 -95.58 ;...
+74 -95.17 ;...
+74.17 -93.17 ;...
+74.02 -91.17 ;...
+73.83 -90.17 ;...
+73.58 -90.67 ;...
+73.58 -90.67 ;...
+73.17 -91.33 ;...
+72.75 -91.92 ;...
+72.75 -93.08 ;...
+72.58 -94 ;...
+72.25 -93.5 ;...
+71.92 -94.17 ;...
+71.42 -93.5 ;...
+71 -92.67 ;...
+70.67 -91.92 ;...
+70.17 -91.33 ;...
+69.75 -92.42 ;...
+69.5 -91.33 ;...
+69.08 -90.67 ;...
+68.58 -89.92 ;...
+69.25 -88.92 ;...
+68.75 -88 ;...
+68.33 -88 ;...
+67.83 -88.17 ;...
+67.25 -87.25 ;...
+67.42 -86.5 ;...
+67.83 -86.5 ;...
+68.17 -85.42 ;...
+68.75 -85.42 ;...
+69.33 -85.5 ;...
+69.83 -85.5 ;...
+69.83 -84.25 ;...
+69.67 -82.92 ;...
+69.33 -82.75 ;...
+69.2 -81.33 ;...
+68.58 -81.33 ;...
+68.58 -81.33 ;...
+68.33 -82.42 ;...
+67.83 -82.42 ;...
+67.58 -81.17 ;...
+67 -81.42 ;...
+66.58 -82.67 ;...
+66.17 -83.75 ;...
+66.33 -85.08 ;...
+66.58 -85.5 ;...
+66.5 -86.75 ;...
+66.17 -86 ;...
+65.5 -87 ;...
+65 -87 ;...
+64.5 -87.75 ;...
+64 -88.5 ;...
+64 -90 ;...
+63.42 -90.67 ;...
+62.92 -90.58 ;...
+62.75 -91.58 ;...
+62.58 -92.5 ;...
+62.08 -92.5 ;...
+61.92 -93.17 ;...
+61.5 -93.67 ;...
+61 -94.17 ;...
+60.58 -94.42 ;...
+60.17 -94.67 ;...
+59.58 -94.75 ;...
+59.08 -94.75 ;...
+58.67 -94.42 ;...
+58.75 -93.75 ;...
+58.67 -93.08 ;...
+58.67 -93.08 ;...
+58.08 -92.67 ;...
+57.67 -92.67 ;...
+57.25 -92.33 ;...
+56.92 -92.33 ;...
+57.08 -91.75 ;...
+57.25 -91 ;...
+56.92 -90 ;...
+56.75 -89 ;...
+56.42 -88.08 ;...
+56.3153 -87.9338 ;...
+56.2103 -87.7884 ;...
+56.1053 -87.6438 ;...
+56 -87.5 ;...
+55.9578 -87.3319 ;...
+55.9155 -87.1643 ;...
+55.8728 -86.9969 ;...
+55.83 -86.83 ;...
+55.67 -86 ;...
+55.25 -85 ;...
+55.25 -84 ;...
+55.25 -83 ;...
+55 -82.17 ;...
+54.25 -82.42 ;...
+53.83 -82.17 ;...
+53.42 -82.17 ;...
+52.92 -82.33 ;...
+52.5 -81.67 ;...
+52.08 -81.25 ;...
+51.75 -80.75 ;...
+51.33 -80.42 ;...
+51 -79.75 ;...
+51.33 -79.83 ;...
+51.67 -79 ;...
+52.17 -78.42 ;...
+52.67 -78.83 ;...
+53.17 -78.83 ;...
+53.17 -78.83 ;...
+53.75 -78.92 ;...
+54.17 -79.17 ;...
+54.58 -79.5 ;...
+54.83 -78.75 ;...
+55.17 -78 ;...
+55.58 -77.25 ;...
+56 -76.75 ;...
+56.67 -76.75 ;...
+57.25 -77 ;...
+57.67 -77.17 ;...
+58.25 -77.5 ;...
+58.42 -78.08 ;...
+58.75 -78.67 ;...
+59.08 -78.58 ;...
+59.17 -77.92 ;...
+59.58 -77.42 ;...
+60 -77.42 ;...
+60.5 -77.58 ;...
+60.75 -78.08 ;...
+61.17 -77.75 ;...
+61.2725 -77.6881 ;...
+61.3751 -77.6258 ;...
+61.4775 -77.5631 ;...
+61.58 -77.5 ;...
+61.6427 -77.6441 ;...
+61.7053 -77.7888 ;...
+61.7677 -77.9341 ;...
+61.83 -78.08 ;...
+62.25 -78.08 ;...
+62.5 -77.42 ;...
+62.42 -76.42 ;...
+62.25 -75.5 ;...
+62.25 -74.83 ;...
+62.42 -73.67 ;...
+62.17 -72.92 ;...
+61.83 -72.25 ;...
+61.83 -72.25 ;...
+61.58 -71.5 ;...
+61.25 -71.75 ;...
+61 -71 ;...
+61 -70.25 ;...
+60.92 -69.5 ;...
+60.5 -69.67 ;...
+60.08 -69.42 ;...
+59.58 -69.5 ;...
+59.25 -69.5 ;...
+58.83 -69 ;...
+58.75 -68.42 ;...
+58.17 -67.75 ;...
+58.42 -66.75 ;...
+58.75 -66.42 ;...
+59 -65.67 ;...
+59.42 -65.25 ;...
+59.75 -65.58 ;...
+60 -65.17 ;...
+60.33 -64.67 ;...
+60 -64.33 ;...
+59.5 -63.83 ;...
+59 -63.25 ;...
+58.5 -62.83 ;...
+58 -62.33 ;...
+57.67 -61.83 ;...
+57.33 -61.83 ;...
+57.08 -61.33 ;...
+56.75 -61.75 ;...
+56.25 -61.75 ;...
+55.92 -61.33 ;...
+55.92 -61.33 ;...
+55.67 -60.5 ;...
+55.25 -60.33 ;...
+55.17 -59.25 ;...
+54.83 -58.92 ;...
+54.75 -58 ;...
+54.6877 -57.8744 ;...
+54.6253 -57.7492 ;...
+54.5627 -57.6244 ;...
+54.5 -57.5 ;...
+54.418 -57.7088 ;...
+54.3357 -57.9167 ;...
+54.253 -58.1237 ;...
+54.17 -58.33 ;...
+54.1483 -58.0796 ;...
+54.126 -57.8295 ;...
+54.1033 -57.5796 ;...
+54.08 -57.33 ;...
+53.67 -57.25 ;...
+53.67 -56.5 ;...
+53.17 -55.83 ;...
+52.75 -56 ;...
+52.08 -55.67 ;...
+51.83 -56.17 ;...
+51.5 -56.83 ;...
+51.42 -57.83 ;...
+51.17 -58.67 ;...
+50.83 -59.08 ;...
+50.5 -59.5 ;...
+50.17 -60 ;...
+50.17 -60.92 ;...
+50.08 -61.67 ;...
+50.25 -62.17 ;...
+50.25 -62.83 ;...
+50.25 -63.42 ;...
+50.33 -64 ;...
+50.25 -64.75 ;...
+50.25 -65.42 ;...
+50.25 -66 ;...
+50.08 -66.75 ;...
+50.08 -66.75 ;...
+49.75 -67.17 ;...
+49.33 -67.33 ;...
+49.33 -68 ;...
+48.92 -68.67 ;...
+48.58 -69.25 ;...
+48.17 -69.67 ;...
+47.83 -70 ;...
+47.5 -70.42 ;...
+47.17 -70.75 ;...
+46.83 -71.17 ;...
+47 -70.5 ;...
+47.33 -70.08 ;...
+47.67 -69.67 ;...
+48.08 -69.25 ;...
+48.42 -68.67 ;...
+48.67 -68 ;...
+48.83 -67.42 ;...
+49.08 -66.75 ;...
+49.17 -66 ;...
+49.25 -65.25 ;...
+49.08 -64.58 ;...
+48.83 -64.25 ;...
+48.5 -64.25 ;...
+48.25 -64.83 ;...
+48.08 -65.33 ;...
+48.08 -66.33 ;...
+47.58 -65.67 ;...
+47.75 -64.83 ;...
+47.33 -65 ;...
+46.75 -64.92 ;...
+46.75 -64.92 ;...
+46.25 -64.58 ;...
+46.08 -64 ;...
+45.75 -63.25 ;...
+45.67 -62.58 ;...
+45.83 -62 ;...
+45.75 -61.5 ;...
+46.17 -61.5 ;...
+46.58 -61 ;...
+47 -60.67 ;...
+46.83 -60.42 ;...
+46.33 -60.5 ;...
+46.2479 -60.3317 ;...
+46.1655 -60.164 ;...
+46.0829 -59.9967 ;...
+46 -59.83 ;...
+45.9178 -59.9782 ;...
+45.8354 -60.1259 ;...
+45.7528 -60.2732 ;...
+45.67 -60.42 ;...
+45.5 -61 ;...
+45.17 -61.17 ;...
+45 -62 ;...
+44.75 -63 ;...
+44.5 -63.67 ;...
+44.5 -64.17 ;...
+44.17 -64.5 ;...
+43.83 -65 ;...
+43.5 -65.5 ;...
+43.75 -66 ;...
+44.17 -66.17 ;...
+44.58 -65.92 ;...
+44.92 -65.33 ;...
+45.08 -64.92 ;...
+45.42 -64.25 ;...
+45.33 -64.92 ;...
+45.67 -64.83 ;...
+45.67 -64.83 ;...
+45.33 -65.58 ;...
+45.17 -66.25 ;...
+45.17 -67 ;...
+44.83 -67 ;...
+44.58 -67.5 ;...
+44.42 -68 ;...
+44.25 -68.58 ;...
+44.42 -68.83 ;...
+44 -69.17 ;...
+43.83 -69.75 ;...
+43.67 -70.25 ;...
+43.5 -70.25 ;...
+43.17 -70.58 ;...
+42.83 -70.83 ;...
+42.58 -70.75 ;...
+42.33 -71 ;...
+42.17 -70.67 ;...
+41.75 -70.5 ;...
+41.79 -70 ;...
+42.02 -70.04 ;...
+41.67 -69.93 ;...
+41.5 -70.67 ;...
+41.67 -70.67 ;...
+41.5 -71.08 ;...
+41.75 -71.33 ;...
+41.42 -71.5 ;...
+41.25 -72 ;...
+41.25 -72.5 ;...
+41.25 -73 ;...
+41.08 -73.5 ;...
+41.08 -73.5 ;...
+40.83 -74 ;...
+40.5 -74.33 ;...
+40.33 -74 ;...
+39.83 -74.17 ;...
+39.33 -74.5 ;...
+39 -75 ;...
+39.33 -75.25 ;...
+39.5 -75.58 ;...
+39 -75.42 ;...
+38.8551 -75.3144 ;...
+38.7102 -75.2091 ;...
+38.5651 -75.1044 ;...
+38.42 -75 ;...
+38.3575 -75.0627 ;...
+38.2951 -75.1252 ;...
+38.2325 -75.1877 ;...
+38.17 -75.25 ;...
+37.75 -75.58 ;...
+37.25 -75.75 ;...
+37.17 -76 ;...
+37.67 -75.83 ;...
+38 -75.67 ;...
+38 -76 ;...
+38.25 -75.92 ;...
+38.42 -76.33 ;...
+38.83 -76.17 ;...
+39.17 -76.17 ;...
+39.5 -76 ;...
+39.33 -76.42 ;...
+39 -76.58 ;...
+38.5 -76.5 ;...
+38.17 -76.42 ;...
+38.25 -76.92 ;...
+37.92 -76.33 ;...
+37.5 -76.42 ;...
+37.17 -76.42 ;...
+37.17 -76.42 ;...
+36.83 -76 ;...
+36.5 -75.83 ;...
+36.11 -75.66 ;...
+35.85 -75.53 ;...
+36.21 -75.91 ;...
+36.17 -76 ;...
+36.08 -76.33 ;...
+35.92 -76.75 ;...
+35.92 -76.25 ;...
+35.92 -75.83 ;...
+35.58 -75.83 ;...
+35.33 -76 ;...
+35.33 -76.33 ;...
+35 -76.83 ;...
+35 -76.33 ;...
+34.75 -76.58 ;...
+34.67 -77.17 ;...
+34.33 -77.58 ;...
+34 -78 ;...
+33.92 -78.42 ;...
+33.67 -78.92 ;...
+33.33 -79.17 ;...
+33 -79.5 ;...
+32.67 -80 ;...
+32.42 -80.5 ;...
+32 -81 ;...
+31.5 -81.25 ;...
+31 -81.5 ;...
+30.5 -81.42 ;...
+30 -81.33 ;...
+30 -81.33 ;...
+29.5 -81.08 ;...
+29 -80.92 ;...
+28.5 -80.58 ;...
+28.17 -80.58 ;...
+27.58 -80.33 ;...
+27 -80.08 ;...
+26.42 -80.08 ;...
+25.83 -80.17 ;...
+25.33 -80.42 ;...
+25.17 -80.67 ;...
+25.08 -81.08 ;...
+25.5 -81.25 ;...
+25.83 -81.67 ;...
+26.33 -81.92 ;...
+26.75 -82.17 ;...
+27.08 -82.5 ;...
+27.165 -82.52 ;...
+27.25 -82.5399 ;...
+27.335 -82.56 ;...
+27.42 -82.58 ;...
+27.545 -82.5401 ;...
+27.67 -82.5002 ;...
+27.795 -82.4601 ;...
+27.92 -82.42 ;...
+27.8976 -82.5026 ;...
+27.8751 -82.5851 ;...
+27.8526 -82.6676 ;...
+27.83 -82.75 ;...
+28.17 -82.75 ;...
+28.58 -82.67 ;...
+29.08 -82.83 ;...
+29.17 -83.08 ;...
+29.75 -83.5 ;...
+30.08 -83.92 ;...
+30 -84.42 ;...
+29.75 -84.83 ;...
+29.67 -85.33 ;...
+30.08 -85.5 ;...
+30.25 -86 ;...
+30.25 -86 ;...
+30.5 -86.33 ;...
+30.42 -86.83 ;...
+30.33 -87.33 ;...
+30.33 -87.83 ;...
+30.67 -88 ;...
+30.42 -88.25 ;...
+30.5 -88.75 ;...
+30.5 -89.17 ;...
+30.25 -89.67 ;...
+30.5 -90.17 ;...
+30.08 -90.42 ;...
+30 -90.08 ;...
+30.08 -89.67 ;...
+30.08 -89.25 ;...
+29.83 -89.33 ;...
+29.67 -89.58 ;...
+29.5 -89.58 ;...
+29.17 -89.08 ;...
+29 -89.25 ;...
+29.25 -89.5 ;...
+29.33 -89.83 ;...
+29.17 -90.17 ;...
+29.08 -90.75 ;...
+29.17 -91.25 ;...
+29.5 -91.33 ;...
+29.83 -91.83 ;...
+29.58 -92.25 ;...
+29.67 -92.75 ;...
+29.83 -93.25 ;...
+29.75 -93.83 ;...
+29.75 -93.83 ;...
+29.67 -94.25 ;...
+29.42 -94.75 ;...
+29.83 -94.75 ;...
+29.7901 -94.8326 ;...
+29.7501 -94.9151 ;...
+29.7101 -94.9976 ;...
+29.67 -95.08 ;...
+29.565 -95.0173 ;...
+29.4601 -94.9547 ;...
+29.355 -94.8923 ;...
+29.25 -94.83 ;...
+29.1876 -94.9152 ;...
+29.1251 -95.0002 ;...
+29.0626 -95.0852 ;...
+29 -95.17 ;...
+28.75 -95.5 ;...
+28.67 -96 ;...
+28.33 -96.5 ;...
+28 -97 ;...
+27.67 -97.33 ;...
+27.17 -97.42 ;...
+26.67 -97.25 ;...
+26.17 -97.25 ;...
+25.75 -97.17 ;...
+25.33 -97.42 ;...
+24.83 -97.58 ;...
+24.33 -97.75 ;...
+23.83 -97.75 ;...
+23.33 -97.75 ;...
+22.83 -97.75 ;...
+22.42 -97.92 ;...
+21.92 -97.75 ;...
+21.58 -97.25 ;...
+21.25 -97.42 ;...
+20.83 -97.25 ;...
+20.5 -97 ;...
+20.17 -96.67 ;...
+19.67 -96.42 ;...
+19.25 -96.17 ;...
+19.25 -96.17 ;...
+18.83 -95.83 ;...
+18.67 -95.33 ;...
+18.5 -94.83 ;...
+18.17 -94.5 ;...
+18.17 -94.17 ;...
+18.33 -93.58 ;...
+18.42 -93 ;...
+18.67 -92.42 ;...
+18.58 -91.83 ;...
+18.42 -91.83 ;...
+18.58 -91.25 ;...
+18.83 -91.33 ;...
+19.08 -91 ;...
+19.25 -90.67 ;...
+19.67 -90.67 ;...
+20 -90.5 ;...
+20.42 -90.42 ;...
+21 -90.33 ;...
+21.17 -90 ;...
+21.25 -89.5 ;...
+21.33 -89 ;...
+21.5 -88.5 ;...
+21.58 -88 ;...
+21.5 -87.58 ;...
+21.5 -87.08 ;...
+21.33 -86.83 ;...
+20.92 -86.83 ;...
+20.5 -87.25 ;...
+20 -87.42 ;...
+19.875 -87.44 ;...
+19.75 -87.4601 ;...
+19.625 -87.48 ;...
+19.5 -87.5 ;...
+19.5 -87.5 ;...
+19.5 -87.5 ;...
+19.5 -87.5 ;...
+19.5 -87.5 ;...
+19.375 -87.52 ;...
+19.25 -87.5401 ;...
+19.125 -87.56 ;...
+19 -87.58 ;...
+18.42 -87.67 ;...
+17.92 -87.92 ;...
+18.33 -88.17 ;...
+17.5 -88.25 ;...
+17 -88.25 ;...
+16.33 -88.5 ;...
+16 -89 ;...
+15.83 -88.58 ;...
+15.58 -88.17 ;...
+15.83 -87.58 ;...
+15.67 -87 ;...
+15.75 -86.5 ;...
+15.92 -86 ;...
+15.83 -85.5 ;...
+15.92 -85 ;...
+15.83 -84.67 ;...
+15.75 -84.25 ;...
+15.33 -83.83 ;...
+15.17 -83.42 ;...
+15 -83.25 ;...
+14.25 -83.25 ;...
+13.83 -83.5 ;...
+13.25 -83.5 ;...
+12.75 -83.5 ;...
+12.17 -83.67 ;...
+11.67 -83.67 ;...
+11.25 -83.83 ;...
+10.83 -83.67 ;...
+10.42 -83.42 ;...
+10.42 -83.42 ;...
+10 -83.17 ;...
+9.67 -82.75 ;...
+9.42 -82.33 ;...
+9 -82.17 ;...
+9 -81.75 ;...
+8.75 -81.42 ;...
+8.92 -80.83 ;...
+9.08 -80.33 ;...
+9.25 -79.83 ;...
+9.58 -79.58 ;...
+9.58 -79.08 ;...
+9.42 -78.67 ;...
+9.33 -78.25 ;...
+9 -77.83 ;...
+8.67 -77.5 ;...
+8.33 -77.17 ;...
+7.83 -76.75 ;...
+8.58 -76.75 ;...
+9 -76.25 ;...
+9.42 -76 ;...
+9.5 -75.58 ;...
+10 -75.58 ;...
+10.5 -75.5 ;...
+10.83 -75.25 ;...
+11 -74.83 ;...
+10.92 -74.42 ;...
+11.25 -74.17 ;...
+11.17 -73.5 ;...
+11.42 -73 ;...
+11.67 -72.67 ;...
+11.67 -72.67 ;...
+11.83 -72.25 ;...
+12.17 -72 ;...
+12.42 -71.67 ;...
+12.33 -71.33 ;...
+12.08 -71.17 ;...
+11.67 -71.42 ;...
+11.5 -71.92 ;...
+11 -71.67 ;...
+10.58 -71.5 ;...
+10.25 -71.75 ;...
+9.75 -72 ;...
+9.42 -71.75 ;...
+9 -71.67 ;...
+9.17 -71.08 ;...
+9.75 -71.08 ;...
+10.33 -71.42 ;...
+10.92 -71.42 ;...
+11.08 -71 ;...
+11.25 -70.58 ;...
+11.5 -70.17 ;...
+11.92 -70.33 ;...
+12.17 -69.92 ;...
+11.5 -69.75 ;...
+11.5 -69.33 ;...
+11.42 -68.83 ;...
+11.17 -68.42 ;...
+10.75 -68.33 ;...
+10.42 -68.17 ;...
+10.5 -67.58 ;...
+10.5 -67 ;...
+10.5 -67 ;...
+10.58 -66.58 ;...
+10.58 -66.17 ;...
+10.33 -66 ;...
+10.17 -65.58 ;...
+10.08 -65 ;...
+10.25 -64.5 ;...
+10.67 -64 ;...
+10.67 -63.42 ;...
+10.67 -62.75 ;...
+10.6701 -62.605 ;...
+10.6701 -62.46 ;...
+10.6701 -62.315 ;...
+10.67 -62.17 ;...
+10.6277 -62.3776 ;...
+10.5853 -62.5851 ;...
+10.5427 -62.7926 ;...
+10.5 -63 ;...
+10.17 -62.67 ;...
+9.75 -62.33 ;...
+9.83 -62 ;...
+9.67 -61.58 ;...
+9.5 -61 ;...
+9 -60.83 ;...
+8.42 -61 ;...
+8.58 -60.25 ;...
+8.25 -59.67 ;...
+7.92 -59.17 ;...
+7.58 -58.67 ;...
+7.17 -58.5 ;...
+6.75 -58.58 ;...
+6.83 -58.17 ;...
+6.5 -57.67 ;...
+6.25 -57.33 ;...
+5.92 -57.08 ;...
+5.83 -56.5 ;...
+5.83 -56 ;...
+5.83 -56 ;...
+5.92 -55.5 ;...
+5.92 -55 ;...
+5.83 -54.5 ;...
+5.67 -54 ;...
+5.58 -53.67 ;...
+5.42 -53.25 ;...
+5.17 -52.75 ;...
+4.83 -52.33 ;...
+4.5 -51.83 ;...
+4.25 -51.5 ;...
+3.75 -51 ;...
+3.17 -51 ;...
+2.5 -50.67 ;...
+1.83 -50.5 ;...
+1.75 -49.92 ;...
+1 -50 ;...
+0.5 -50.42 ;...
+0.17 -50.75 ;...
+-0.17 -51.17 ;...
+-0.67 -51.33 ;...
+-1.25 -51.42 ;...
+-1 -50.83 ;...
+-1.67 -50.58 ;...
+-1 -50.42 ;...
+-0.42 -50.42 ;...
+-0.17 -50 ;...
+-0.25 -49.5 ;...
+-0.17 -49 ;...
+-0.33 -48.42 ;...
+-1 -48.58 ;...
+-1 -48.58 ;...
+-1.42 -48.75 ;...
+-1.58 -48.5 ;...
+-0.83 -48.17 ;...
+-0.67 -47.83 ;...
+-0.67 -47.25 ;...
+-1 -46.67 ;...
+-1.17 -46.17 ;...
+-1.33 -45.5 ;...
+-1.67 -45.33 ;...
+-1.5 -44.83 ;...
+-1.92 -44.67 ;...
+-2.33 -44.42 ;...
+-2.83 -44.58 ;...
+-2.67 -44 ;...
+-2.42 -43.5 ;...
+-2.5 -43 ;...
+-2.75 -42.5 ;...
+-2.75 -42 ;...
+-3 -41.5 ;...
+-3 -41 ;...
+-2.83 -40.5 ;...
+-2.83 -40 ;...
+-3.17 -39.5 ;...
+-3.5 -39 ;...
+-3.83 -38.5 ;...
+-4.17 -38.17 ;...
+-4.5 -37.83 ;...
+-4.75 -37.33 ;...
+-5.08 -37 ;...
+-5.17 -36.5 ;...
+-5.17 -36.5 ;...
+-5.08 -36 ;...
+-5.25 -35.42 ;...
+-5.83 -35.17 ;...
+-6.42 -35 ;...
+-7 -34.83 ;...
+-7.5 -34.75 ;...
+-8.17 -34.83 ;...
+-8.67 -35.08 ;...
+-9.17 -35.33 ;...
+-9.58 -35.58 ;...
+-10 -36 ;...
+-10.42 -36.25 ;...
+-10.75 -36.75 ;...
+-11 -37 ;...
+-11.5 -37.33 ;...
+-12 -37.58 ;...
+-12.58 -38 ;...
+-13 -38.42 ;...
+-13.33 -38.92 ;...
+-14 -39 ;...
+-14.5 -39.08 ;...
+-15 -39 ;...
+-15.5 -39 ;...
+-16 -38.92 ;...
+-16.58 -39.08 ;...
+-17.17 -39.17 ;...
+-17.67 -39.17 ;...
+-18 -39.5 ;...
+-18.5 -39.58 ;...
+-19 -39.58 ;...
+-19 -39.58 ;...
+-19.5 -39.67 ;...
+-20 -40.17 ;...
+-20.5 -40.33 ;...
+-21 -40.83 ;...
+-21.5 -41 ;...
+-22 -41 ;...
+-22.25 -41.5 ;...
+-22.5 -42 ;...
+-22.92 -42 ;...
+-22.92 -42.5 ;...
+-23 -43 ;...
+-23 -43.5 ;...
+-22.92 -44 ;...
+-23 -44.67 ;...
+-23.33 -44.5 ;...
+-23.33 -45 ;...
+-23.83 -45.42 ;...
+-23.67 -45.75 ;...
+-24 -46.33 ;...
+-24.17 -46.83 ;...
+-24.58 -47.25 ;...
+-25 -47.75 ;...
+-25.42 -48.17 ;...
+-25.83 -48.58 ;...
+-26.33 -48.58 ;...
+-27 -48.58 ;...
+-27.5 -48.5 ;...
+-28 -48.58 ;...
+-28.5 -48.75 ;...
+-28.67 -49 ;...
+-28.67 -49 ;...
+-29 -49.42 ;...
+-29.5 -49.83 ;...
+-30 -50.17 ;...
+-30.5 -50.33 ;...
+-31 -50.67 ;...
+-31.5 -51 ;...
+-31.83 -51.5 ;...
+-32.17 -52 ;...
+-32.5 -52.42 ;...
+-33 -52.58 ;...
+-33.5 -53 ;...
+-33.83 -53.5 ;...
+-34.17 -53.58 ;...
+-34.5 -54 ;...
+-34.75 -54.5 ;...
+-34.83 -55 ;...
+-34.75 -55.67 ;...
+-34.83 -56.25 ;...
+-34.67 -56.83 ;...
+-34.42 -57.17 ;...
+-34.42 -57.83 ;...
+-34 -58.33 ;...
+-34.33 -58.5 ;...
+-34.67 -58.33 ;...
+-35 -57.5 ;...
+-35.33 -57.17 ;...
+-35.83 -57.42 ;...
+-36.25 -57.17 ;...
+-36.33 -56.75 ;...
+-36.83 -56.67 ;...
+-36.83 -56.67 ;...
+-37.33 -57 ;...
+-37.75 -57.42 ;...
+-38.17 -57.58 ;...
+-38.42 -58.17 ;...
+-38.58 -58.67 ;...
+-38.75 -59.33 ;...
+-38.83 -60 ;...
+-38.92 -60.5 ;...
+-39 -61.25 ;...
+-39 -62 ;...
+-38.75 -62.25 ;...
+-39.17 -62.33 ;...
+-39.5 -62 ;...
+-39.83 -62.17 ;...
+-40.33 -62.42 ;...
+-40.58 -62.25 ;...
+-40.92 -62.33 ;...
+-41.17 -63 ;...
+-41.17 -63.83 ;...
+-41 -64.33 ;...
+-40.75 -64.83 ;...
+-40.8126 -64.9148 ;...
+-40.8751 -64.9997 ;...
+-40.9376 -65.0848 ;...
+-41 -65.17 ;...
+-41.125 -65.1277 ;...
+-41.25 -65.0853 ;...
+-41.375 -65.0427 ;...
+-41.5 -65 ;...
+-42.08 -65 ;...
+-42.33 -64.5 ;...
+-42.08 -63.75 ;...
+-42.58 -63.58 ;...
+-42.83 -63.67 ;...
+-42.83 -64.17 ;...
+-42.5 -64.5 ;...
+-42.5 -64.5 ;...
+-42.83 -65 ;...
+-43 -64.33 ;...
+-43.33 -65 ;...
+-43.67 -65.25 ;...
+-44 -65.17 ;...
+-44.5 -65.17 ;...
+-44.67 -65.58 ;...
+-45 -65.58 ;...
+-45 -66.17 ;...
+-45.25 -66.83 ;...
+-45.5 -67.17 ;...
+-46 -67.5 ;...
+-46.5 -67.42 ;...
+-46.83 -67 ;...
+-47.17 -66.5 ;...
+-47.17 -65.75 ;...
+-47.5 -65.67 ;...
+-48 -65.83 ;...
+-48.33 -66.33 ;...
+-48.67 -67 ;...
+-49 -67.5 ;...
+-49.5 -67.67 ;...
+-50 -67.83 ;...
+-50.17 -68.5 ;...
+-50.5 -69 ;...
+-51 -69.17 ;...
+-51.42 -69 ;...
+-51.83 -68.83 ;...
+-52.33 -68.33 ;...
+-52.25 -69 ;...
+-52.25 -69 ;...
+-52.25 -69.5 ;...
+-52.5 -69.58 ;...
+-52.5 -70 ;...
+-52.75 -70.75 ;...
+-53.25 -70.92 ;...
+-53.75 -70.92 ;...
+-53.83 -71.25 ;...
+-53.67 -72 ;...
+-53.42 -72.25 ;...
+-53.17 -71.33 ;...
+-52.83 -71.08 ;...
+-52.75 -71.5 ;...
+-53.17 -72 ;...
+-53.42 -72.5 ;...
+-53.17 -73.17 ;...
+-52.92 -73.5 ;...
+-52.67 -73.5 ;...
+-52.58 -74 ;...
+-52.17 -74.17 ;...
+-52.42 -74.5 ;...
+-52.25 -75 ;...
+-51.75 -75 ;...
+-51.83 -74.33 ;...
+-51.5 -74.17 ;...
+-51 -74.33 ;...
+-51.33 -74.5 ;...
+-51.393 -74.7066 ;...
+-51.4557 -74.9139 ;...
+-51.5181 -75.1216 ;...
+-51.58 -75.33 ;...
+-51.4776 -75.2469 ;...
+-51.3751 -75.1643 ;...
+-51.2726 -75.0819 ;...
+-51.17 -75 ;...
+-50.67 -74.83 ;...
+-50.75 -74.5 ;...
+-50.75 -74.5 ;...
+-50.42 -74.67 ;...
+-50 -74.5 ;...
+-50.75 -75.33 ;...
+-50.17 -75.33 ;...
+-50.1076 -75.2472 ;...
+-50.0451 -75.1646 ;...
+-49.9826 -75.0822 ;...
+-49.92 -75 ;...
+-49.8977 -75.1252 ;...
+-49.8753 -75.2502 ;...
+-49.8527 -75.3752 ;...
+-49.83 -75.5 ;...
+-49.42 -75.42 ;...
+-49.17 -75.58 ;...
+-48.58 -75.42 ;...
+-48.17 -75.42 ;...
+-47.83 -75.33 ;...
+-47.75 -74.92 ;...
+-48.33 -74.5 ;...
+-47.83 -74.5 ;...
+-47.42 -74.42 ;...
+-46.83 -74 ;...
+-46.83 -74.5 ;...
+-46.67 -75 ;...
+-46.92 -75.5 ;...
+-46.58 -75.5 ;...
+-46.17 -74.75 ;...
+-45.92 -75 ;...
+-45.83 -74.5 ;...
+-45.58 -74.67 ;...
+-45.25 -74.5 ;...
+-45 -74.33 ;...
+-44.58 -74.5 ;...
+-44.17 -74.25 ;...
+-43.83 -73.83 ;...
+-44.17 -73.5 ;...
+-44.17 -73.5 ;...
+-44.42 -73.67 ;...
+-44.67 -73.58 ;...
+-45 -73.83 ;...
+-45.17 -73.67 ;...
+-45.5 -73.58 ;...
+-45 -73.33 ;...
+-44.67 -73.42 ;...
+-44.42 -73 ;...
+-44.17 -73.17 ;...
+-43.75 -72.83 ;...
+-43.42 -73 ;...
+-43 -72.75 ;...
+-42.33 -72.75 ;...
+-42.2901 -72.6874 ;...
+-42.2501 -72.6248 ;...
+-42.2101 -72.5624 ;...
+-42.17 -72.5 ;...
+-42.1276 -72.5626 ;...
+-42.0851 -72.6252 ;...
+-42.0426 -72.6876 ;...
+-42 -72.75 ;...
+-41.9176 -72.6873 ;...
+-41.8351 -72.6247 ;...
+-41.7526 -72.5623 ;...
+-41.67 -72.5 ;...
+-41.6277 -72.6252 ;...
+-41.5853 -72.7503 ;...
+-41.5427 -72.8752 ;...
+-41.5 -73 ;...
+-41.83 -73.17 ;...
+-41.75 -73.58 ;...
+-41.42 -73.75 ;...
+-41 -73.83 ;...
+-40.58 -73.67 ;...
+-40 -73.67 ;...
+-39.5 -73.25 ;...
+-39 -73.33 ;...
+-38.58 -73.42 ;...
+-38.17 -73.42 ;...
+-37.75 -73.67 ;...
+-37.17 -73.5 ;...
+-37.17 -73.08 ;...
+-37.17 -73.08 ;...
+-36.67 -73.08 ;...
+-36.5 -72.83 ;...
+-36 -72.67 ;...
+-35.5 -72.5 ;...
+-35 -72.17 ;...
+-34.5 -71.92 ;...
+-34 -71.83 ;...
+-33.67 -71.58 ;...
+-33.17 -71.67 ;...
+-32.5 -71.33 ;...
+-32.17 -71.5 ;...
+-31.83 -71.5 ;...
+-31.33 -71.58 ;...
+-30.75 -71.67 ;...
+-30.25 -71.58 ;...
+-30 -71.25 ;...
+-29.33 -71.33 ;...
+-29 -71.5 ;...
+-28.5 -71.25 ;...
+-28 -71.17 ;...
+-27.5 -70.92 ;...
+-27 -70.92 ;...
+-26.33 -70.58 ;...
+-25.75 -70.75 ;...
+-25.42 -70.5 ;...
+-25 -70.5 ;...
+-24.5 -70.58 ;...
+-24 -70.5 ;...
+-23.5 -70.5 ;...
+-23 -70.5 ;...
+-23 -70.5 ;...
+-22.5 -70.25 ;...
+-22 -70.17 ;...
+-21.5 -70.08 ;...
+-21 -70.17 ;...
+-20.5 -70.17 ;...
+-20 -70.08 ;...
+-19.33 -70.25 ;...
+-18.75 -70.33 ;...
+-18.25 -70.33 ;...
+-17.83 -70.83 ;...
+-17.67 -71.17 ;...
+-17.25 -71.5 ;...
+-17 -72 ;...
+-16.67 -72.5 ;...
+-16.58 -72.83 ;...
+-16.25 -73.5 ;...
+-15.83 -74 ;...
+-15.75 -74.5 ;...
+-15.33 -75.17 ;...
+-15 -75.5 ;...
+-14.67 -75.83 ;...
+-14.17 -76.25 ;...
+-13.67 -76.17 ;...
+-13 -76.5 ;...
+-12.5 -76.67 ;...
+-12.17 -77 ;...
+-11.83 -77.17 ;...
+-11.33 -77.5 ;...
+-10.83 -77.67 ;...
+-10.17 -78.17 ;...
+-10.17 -78.17 ;...
+-9.67 -78.33 ;...
+-9.17 -78.5 ;...
+-8.67 -78.67 ;...
+-8.17 -79 ;...
+-7.83 -79.5 ;...
+-7.25 -79.67 ;...
+-6.83 -80 ;...
+-6.5 -80.33 ;...
+-6.25 -80.83 ;...
+-6 -81.17 ;...
+-5.67 -80.83 ;...
+-5.17 -81.17 ;...
+-4.67 -81.33 ;...
+-4.25 -81.33 ;...
+-3.83 -80.83 ;...
+-3.5 -80.33 ;...
+-3.33 -80 ;...
+-2.83 -79.83 ;...
+-2.5 -79.67 ;...
+-2.5 -80.08 ;...
+-3 -80.17 ;...
+-2.5 -80.58 ;...
+-2.33 -80.92 ;...
+-2.17 -80.75 ;...
+-1.67 -80.75 ;...
+-1 -80.92 ;...
+-0.83 -80.5 ;...
+-0.33 -80.5 ;...
+0 -80.08 ;...
+NaN NaN ;...
+66.08 -125.08 ;...
+66.13 -123.58 ;...
+66.25 -122 ;...
+66 -121 ;...
+65.75 -122 ;...
+65.58 -122.75 ;...
+65 -123.25 ;...
+65 -121.75 ;...
+65.33 -121.58 ;...
+65.67 -120.67 ;...
+65.33 -120.5 ;...
+64.83 -121 ;...
+65.37 -119.33 ;...
+65.82 -120 ;...
+65.67 -118.42 ;...
+66.08 -118.08 ;...
+66.42 -117.67 ;...
+66.33 -119.08 ;...
+66.3537 -119.434 ;...
+66.3766 -119.789 ;...
+66.3987 -120.144 ;...
+66.42 -120.5 ;...
+66.4828 -120.314 ;...
+66.5454 -120.127 ;...
+66.6078 -119.939 ;...
+66.67 -119.75 ;...
+66.92 -118.92 ;...
+67.08 -120 ;...
+66.75 -121.33 ;...
+66.58 -122.67 ;...
+66.37 -123.83 ;...
+66.08 -125.08 ;...
+NaN NaN ;...
+61.17 -117 ;...
+60.83 -116 ;...
+60.83 -115.25 ;...
+61 -114.42 ;...
+60.92 -113.92 ;...
+61.25 -113.75 ;...
+61.42 -112.92 ;...
+61.58 -112.17 ;...
+62.08 -111.75 ;...
+62.33 -110.92 ;...
+62.3533 -110.648 ;...
+62.3761 -110.376 ;...
+62.3983 -110.103 ;...
+62.42 -109.83 ;...
+62.4826 -109.914 ;...
+62.5451 -109.999 ;...
+62.6076 -110.084 ;...
+62.67 -110.17 ;...
+62.6709 -109.878 ;...
+62.6712 -109.585 ;...
+62.6709 -109.292 ;...
+62.67 -109 ;...
+62.83 -110 ;...
+62.83 -110.75 ;...
+62.67 -111.58 ;...
+62.42 -111.92 ;...
+62.08 -112.42 ;...
+62 -113.08 ;...
+62.17 -113.83 ;...
+62.42 -114.17 ;...
+62.42 -115.33 ;...
+62.17 -115.08 ;...
+62.0852 -114.934 ;...
+62.0003 -114.788 ;...
+61.9152 -114.644 ;...
+61.83 -114.5 ;...
+61.8104 -114.688 ;...
+61.7905 -114.875 ;...
+61.7704 -115.063 ;...
+61.75 -115.25 ;...
+61.42 -115.33 ;...
+61.17 -115.83 ;...
+61.17 -117 ;...
+NaN NaN ;...
+58.67 -111.25 ;...
+58.58 -110.33 ;...
+58.92 -110 ;...
+59.08 -109.25 ;...
+59.08 -108.25 ;...
+59.123 -108.043 ;...
+59.1657 -107.836 ;...
+59.208 -107.628 ;...
+59.25 -107.42 ;...
+59.2927 -107.564 ;...
+59.3353 -107.709 ;...
+59.3777 -107.854 ;...
+59.42 -108 ;...
+59.42 -108.58 ;...
+59.67 -109.17 ;...
+59.67 -109.67 ;...
+59.25 -110.17 ;...
+59 -110.67 ;...
+58.67 -111.25 ;...
+NaN NaN ;...
+56.33 -103.08 ;...
+56.58 -102.17 ;...
+57 -102.17 ;...
+57.42 -102.17 ;...
+57.67 -101.58 ;...
+58.17 -102.08 ;...
+57.75 -102.5 ;...
+57.6676 -102.583 ;...
+57.5851 -102.666 ;...
+57.5026 -102.748 ;...
+57.42 -102.83 ;...
+57.3151 -102.747 ;...
+57.2101 -102.664 ;...
+57.1051 -102.582 ;...
+57 -102.5 ;...
+56.9175 -102.563 ;...
+56.8351 -102.626 ;...
+56.7525 -102.688 ;...
+56.67 -102.75 ;...
+56.33 -103.08 ;...
+NaN NaN ;...
+53.33 -99.25 ;...
+53 -98.92 ;...
+52.5 -98.67 ;...
+52.25 -98.17 ;...
+51.92 -98.17 ;...
+51.92 -97.42 ;...
+51.42 -97.33 ;...
+51.58 -96.83 ;...
+51.17 -96.92 ;...
+50.75 -97 ;...
+50.33 -97 ;...
+50.5 -96.42 ;...
+51.17 -96.17 ;...
+51.58 -96.58 ;...
+52.17 -97 ;...
+52.67 -97.25 ;...
+53.17 -97.58 ;...
+53.67 -97.83 ;...
+53.75 -98.42 ;...
+53.75 -99 ;...
+53.33 -99.25 ;...
+NaN NaN ;...
+46.67 -92.08 ;...
+46.67 -91.58 ;...
+46.92 -90.92 ;...
+46.5 -90.5 ;...
+46.75 -89.75 ;...
+47 -89 ;...
+47.42 -88.25 ;...
+47.42 -87.75 ;...
+47.08 -88.42 ;...
+46.75 -88.42 ;...
+46.83 -87.75 ;...
+46.42 -87.33 ;...
+46.42 -86.67 ;...
+46.67 -85.83 ;...
+46.75 -85 ;...
+46.5 -85 ;...
+46.5002 -84.875 ;...
+46.5003 -84.75 ;...
+46.5002 -84.625 ;...
+46.5 -84.5 ;...
+46.6051 -84.5621 ;...
+46.7101 -84.6245 ;...
+46.8151 -84.6871 ;...
+46.92 -84.75 ;...
+47.33 -84.67 ;...
+47.58 -85 ;...
+47.92 -85 ;...
+47.92 -85.75 ;...
+48.25 -86.17 ;...
+48.75 -86.5 ;...
+48.75 -87.33 ;...
+49 -88.08 ;...
+48.67 -88.42 ;...
+48.42 -89.08 ;...
+48.08 -89.42 ;...
+47.83 -89.83 ;...
+47.83 -89.83 ;...
+47.67 -90.5 ;...
+47.33 -91 ;...
+47 -91.58 ;...
+46.67 -92.08 ;...
+NaN NaN ;...
+44.58 -88 ;...
+44.6655 -87.8133 ;...
+44.7506 -87.6261 ;...
+44.8355 -87.4383 ;...
+44.92 -87.25 ;...
+44.8151 -87.3128 ;...
+44.7101 -87.3755 ;...
+44.6051 -87.4378 ;...
+44.5 -87.5 ;...
+44.4175 -87.5201 ;...
+44.335 -87.5401 ;...
+44.2525 -87.5601 ;...
+44.17 -87.58 ;...
+43.58 -87.75 ;...
+43.17 -87.92 ;...
+42.75 -87.83 ;...
+42.17 -87.83 ;...
+42.0451 -87.747 ;...
+41.9201 -87.6644 ;...
+41.7951 -87.582 ;...
+41.67 -87.5 ;...
+41.6701 -87.395 ;...
+41.6702 -87.29 ;...
+41.6701 -87.185 ;...
+41.67 -87.08 ;...
+41.83 -86.67 ;...
+42.17 -86.42 ;...
+42.75 -86.25 ;...
+43.25 -86.33 ;...
+43.67 -86.5 ;...
+44.08 -86.5 ;...
+44.5 -86.25 ;...
+44.92 -86.08 ;...
+45.17 -85.33 ;...
+45.67 -85 ;...
+45.6702 -84.875 ;...
+45.6703 -84.75 ;...
+45.6702 -84.625 ;...
+45.67 -84.5 ;...
+45.6276 -84.3948 ;...
+45.5852 -84.2897 ;...
+45.5426 -84.1848 ;...
+45.5 -84.08 ;...
+45.33 -83.58 ;...
+45 -83.33 ;...
+44.42 -83.33 ;...
+43.92 -83.83 ;...
+43.58 -83.83 ;...
+43.92 -83.42 ;...
+44.08 -82.92 ;...
+43.58 -82.67 ;...
+43.58 -82.67 ;...
+43.08 -82.42 ;...
+43.33 -81.75 ;...
+43.92 -81.75 ;...
+44.42 -81.5 ;...
+44.83 -81.33 ;...
+45.17 -81.5 ;...
+44.75 -80.92 ;...
+44.5 -80.08 ;...
+44.5826 -79.9979 ;...
+44.6651 -79.9155 ;...
+44.7476 -79.8329 ;...
+44.83 -79.75 ;...
+44.9777 -79.874 ;...
+45.1253 -79.9987 ;...
+45.2727 -80.124 ;...
+45.42 -80.25 ;...
+45.92 -80.75 ;...
+46.08 -81.67 ;...
+46.17 -82.33 ;...
+46.17 -83 ;...
+46.25 -83.67 ;...
+46.25 -84.17 ;...
+45.92 -84 ;...
+45.92 -84.83 ;...
+46 -85.25 ;...
+45.92 -85.75 ;...
+45.75 -86.75 ;...
+45.75 -87 ;...
+45.42 -87.33 ;...
+45 -87.67 ;...
+44.58 -88 ;...
+NaN NaN ;...
+45.83 -83.08 ;...
+45.83 -82.33 ;...
+45.92 -81.75 ;...
+45.5 -81.92 ;...
+45.67 -82.42 ;...
+45.83 -83.08 ;...
+NaN NaN ;...
+41.75 -83.5 ;...
+41.5 -82.83 ;...
+41.5 -82.25 ;...
+41.58 -81.67 ;...
+41.83 -81.08 ;...
+42 -80.5 ;...
+42.25 -79.83 ;...
+42.5 -79.25 ;...
+42.83 -78.83 ;...
+42.83 -79.5 ;...
+42.83 -80.17 ;...
+42.58 -80.5 ;...
+42.67 -80.92 ;...
+42.58 -81.5 ;...
+42.25 -81.83 ;...
+42 -82.5 ;...
+42 -83.08 ;...
+41.75 -83.5 ;...
+NaN NaN ;...
+43.25 -79.75 ;...
+43.17 -79.25 ;...
+43.33 -78.58 ;...
+43.33 -78 ;...
+43.25 -77.42 ;...
+43.33 -76.83 ;...
+43.5 -76.17 ;...
+44 -76.25 ;...
+44.33 -76.08 ;...
+44.17 -76.67 ;...
+43.92 -77 ;...
+44 -77.67 ;...
+43.92 -78.17 ;...
+43.83 -78.83 ;...
+43.67 -79.42 ;...
+43.25 -79.75 ;...
+NaN NaN ;...
+0.08 -91.42 ;...
+-0.33 -91.08 ;...
+-0.75 -90.83 ;...
+-1.08 -91.17 ;...
+-0.92 -91.5 ;...
+-0.67 -91.17 ;...
+-0.42 -91.5 ;...
+0.08 -91.42 ;...
+NaN NaN ;...
+-41.83 -74 ;...
+-42 -73.42 ;...
+-42.33 -73.33 ;...
+-42.67 -73.67 ;...
+-43 -73.5 ;...
+-43.42 -73.75 ;...
+-43.33 -74.33 ;...
+-42.67 -74.17 ;...
+-42.33 -74.17 ;...
+-41.83 -74 ;...
+NaN NaN ;...
+-52.58 -68.75 ;...
+-53 -68.25 ;...
+-53.33 -68.17 ;...
+-53.67 -68 ;...
+-54 -67.42 ;...
+-54.25 -66.75 ;...
+-54.5 -66.33 ;...
+-54.67 -65.83 ;...
+-54.67 -65.17 ;...
+-54.92 -65.25 ;...
+-55 -65.67 ;...
+-55 -66.5 ;...
+-55.25 -67.33 ;...
+-55.25 -68.17 ;...
+-55.67 -68 ;...
+-55.42 -68.67 ;...
+-55.5 -69.33 ;...
+-55.33 -69.83 ;...
+-55 -70 ;...
+-55.17 -70.5 ;...
+-55 -71.17 ;...
+-54.67 -72 ;...
+-54.67 -72 ;...
+-54.42 -72 ;...
+-54.42 -72.5 ;...
+-54.08 -72.67 ;...
+-54.08 -73.33 ;...
+-53.83 -73.33 ;...
+-53.58 -73.83 ;...
+-53.33 -73.5 ;...
+-53.25 -74 ;...
+-53 -74.5 ;...
+-52.75 -74.67 ;...
+-53 -74 ;...
+-53.25 -73.33 ;...
+-53.58 -72.5 ;...
+-53.83 -72.08 ;...
+-54 -71.33 ;...
+-54.17 -71 ;...
+-53.67 -70.5 ;...
+-54.17 -70.33 ;...
+-54.1901 -70.2476 ;...
+-54.2101 -70.1652 ;...
+-54.2301 -70.0826 ;...
+-54.25 -70 ;...
+-54.125 -70.0202 ;...
+-54 -70.0402 ;...
+-53.875 -70.0602 ;...
+-53.75 -70.08 ;...
+-53.6879 -69.8917 ;...
+-53.6256 -69.7039 ;...
+-53.5629 -69.5167 ;...
+-53.5 -69.33 ;...
+-53.33 -69.42 ;...
+-53.5 -70.17 ;...
+-53.33 -70.42 ;...
+-52.83 -70.25 ;...
+-52.83 -69.75 ;...
+-52.58 -69.5 ;...
+-52.67 -69.17 ;...
+-52.58 -68.75 ;...
+NaN NaN ;...
+-51.83 -61.17 ;...
+-52 -60.5 ;...
+-51.75 -60.17 ;...
+-51.42 -60.5 ;...
+-51.33 -59.67 ;...
+-51.5 -59 ;...
+-51.33 -58.33 ;...
+-51.5 -57.75 ;...
+-51.83 -57.75 ;...
+-52 -58.5 ;...
+-52.25 -58.83 ;...
+-52.33 -59.5 ;...
+-52.08 -59.67 ;...
+-52 -60 ;...
+-52.25 -60.5 ;...
+-51.83 -61.17 ;...
+NaN NaN ;...
+-54 -38 ;...
+-54.08 -37 ;...
+-54.33 -36.25 ;...
+-54.58 -35.83 ;...
+-54.92 -36 ;...
+-54.5 -36.58 ;...
+-54.25 -37.25 ;...
+-54 -38 ;...
+NaN NaN ;...
+10 -61.83 ;...
+10.17 -61.5 ;...
+10.58 -61.5 ;...
+10.67 -61.67 ;...
+10.75 -61 ;...
+10.25 -61 ;...
+10 -61.25 ;...
+10 -61.83 ;...
+NaN NaN ;...
+18.17 -78.33 ;...
+18.33 -78.25 ;...
+18.5 -77.83 ;...
+18.42 -77.33 ;...
+18.33 -76.83 ;...
+18.17 -76.42 ;...
+17.83 -76.25 ;...
+17.83 -76.58 ;...
+17.92 -76.92 ;...
+17.75 -77.25 ;...
+17.83 -77.67 ;...
+18.17 -78 ;...
+18.17 -78.33 ;...
+NaN NaN ;...
+21.77 -83.07 ;...
+21.98 -82.87 ;...
+21.82 -82.65 ;...
+21.7475 -82.6124 ;...
+21.675 -82.5749 ;...
+21.6025 -82.5374 ;...
+21.53 -82.5 ;...
+21.5051 -82.5825 ;...
+21.4801 -82.6651 ;...
+21.4551 -82.7475 ;...
+21.43 -82.83 ;...
+21.47 -83.05 ;...
+21.6 -82.95 ;...
+21.77 -83.07 ;...
+NaN NaN ;...
+21.83 -84.83 ;...
+22 -84.33 ;...
+22.33 -84.33 ;...
+22.58 -84 ;...
+22.83 -83.5 ;...
+22.92 -83 ;...
+23 -82.5 ;...
+23.17 -82 ;...
+23.17 -81.5 ;...
+23 -81 ;...
+23 -80.5 ;...
+22.83 -80.08 ;...
+22.67 -79.67 ;...
+22.33 -79.33 ;...
+22.33 -78.83 ;...
+22.17 -78.33 ;...
+21.92 -77.83 ;...
+21.67 -77.42 ;...
+21.5 -77 ;...
+21.17 -76.5 ;...
+21.08 -76 ;...
+21 -75.58 ;...
+20.58 -75.67 ;...
+20.67 -75.33 ;...
+20.58 -74.83 ;...
+20.25 -74.5 ;...
+20.25 -74.25 ;...
+20.08 -74.17 ;...
+20.08 -74.17 ;...
+20 -74.67 ;...
+19.83 -75 ;...
+19.83 -75.5 ;...
+19.92 -76 ;...
+19.92 -76.5 ;...
+19.83 -77 ;...
+19.8302 -77.1675 ;...
+19.8303 -77.335 ;...
+19.8302 -77.5025 ;...
+19.83 -77.67 ;...
+19.8925 -77.6076 ;...
+19.955 -77.5451 ;...
+20.0175 -77.4826 ;...
+20.08 -77.42 ;...
+20.33 -77.08 ;...
+20.67 -77.25 ;...
+20.67 -78 ;...
+21 -78.5 ;...
+21.42 -78.58 ;...
+21.58 -78.75 ;...
+21.5 -79.25 ;...
+21.67 -79.75 ;...
+21.75 -80.17 ;...
+22 -80.5 ;...
+22 -81.25 ;...
+22.08 -81.75 ;...
+22.33 -82.08 ;...
+22.5 -81.58 ;...
+22.58 -82.25 ;...
+22.67 -82.75 ;...
+22.42 -83.08 ;...
+22.17 -83.42 ;...
+22.17 -84 ;...
+22.085 -84.0426 ;...
+22 -84.0851 ;...
+21.915 -84.1276 ;...
+21.83 -84.17 ;...
+21.8302 -84.335 ;...
+21.8303 -84.5 ;...
+21.8302 -84.665 ;...
+21.83 -84.83 ;...
+NaN NaN ;...
+18.33 -74.5 ;...
+18.58 -74.17 ;...
+18.5 -73.67 ;...
+18.33 -73.25 ;...
+18.33 -72.75 ;...
+18.3926 -72.6451 ;...
+18.4551 -72.5402 ;...
+18.5176 -72.4351 ;...
+18.58 -72.33 ;...
+18.6851 -72.4348 ;...
+18.7901 -72.5397 ;...
+18.8951 -72.6448 ;...
+19 -72.75 ;...
+19.33 -72.75 ;...
+19.67 -73 ;...
+19.67 -73 ;...
+19.67 -73.5 ;...
+19.92 -73.17 ;...
+19.92 -72.58 ;...
+19.67 -72 ;...
+19.83 -71.67 ;...
+19.83 -71 ;...
+19.75 -70.5 ;...
+19.67 -69.92 ;...
+19.33 -69.75 ;...
+19.33 -69.17 ;...
+19.08 -69.58 ;...
+18.92 -68.75 ;...
+18.5 -68.33 ;...
+18.08 -68.67 ;...
+18.42 -69 ;...
+18.42 -69.5 ;...
+18.25 -70.08 ;...
+18.17 -70.58 ;...
+18.42 -70.58 ;...
+18.25 -71.08 ;...
+18 -71.08 ;...
+17.58 -71.42 ;...
+18.08 -71.83 ;...
+18.17 -72.33 ;...
+18.08 -72.75 ;...
+18.17 -73.17 ;...
+18.17 -73.67 ;...
+18 -73.83 ;...
+18.33 -74.5 ;...
+NaN NaN ;...
+18 -67.17 ;...
+18.5 -67.17 ;...
+18.5 -66.58 ;...
+18.5 -66 ;...
+18.25 -65.67 ;...
+18 -65.92 ;...
+18 -66.33 ;...
+17.92 -66.75 ;...
+18 -67.17 ;...
+NaN NaN ;...
+16.3689 -61.7519 ;...
+16.2811 -61.5521 ;...
+16.4582 -61.544 ;...
+16.503 -61.4947 ;...
+16.4717 -61.4276 ;...
+16.3672 -61.3918 ;...
+16.2654 -61.1898 ;...
+16.1775 -61.464 ;...
+16.2368 -61.5943 ;...
+16.0384 -61.5661 ;...
+15.9543 -61.6919 ;...
+16.0689 -61.7602 ;...
+16.3295 -61.7905 ;...
+16.3689 -61.7519 ;...
+NaN NaN ;...
+14.5484 -60.9511 ;...
+14.8553 -61.1413 ;...
+14.9656 -61.1213 ;...
+14.9616 -60.9787 ;...
+14.7803 -60.831 ;...
+14.5404 -60.7499 ;...
+14.5484 -60.9511 ;...
+NaN NaN ;...
+25.17 -78.25 ;...
+25.17 -78 ;...
+24.67 -77.75 ;...
+24.33 -77.75 ;...
+24.2475 -77.6874 ;...
+24.1651 -77.6248 ;...
+24.0825 -77.5624 ;...
+24 -77.5 ;...
+23.9175 -77.5426 ;...
+23.835 -77.5851 ;...
+23.7525 -77.6276 ;...
+23.67 -77.67 ;...
+24.17 -77.92 ;...
+24.58 -78.42 ;...
+24.83 -78.17 ;...
+25.17 -78.25 ;...
+NaN NaN ;...
+20.92 -73.67 ;...
+21.17 -73.5 ;...
+21.08 -73.17 ;...
+21.33 -73 ;...
+20.92 -73.17 ;...
+20.92 -73.67 ;...
+NaN NaN ;...
+26.5651 -78.788 ;...
+26.5915 -78.68 ;...
+26.8 -78.5758 ;...
+26.7468 -78.5222 ;...
+26.7475 -77.9 ;...
+26.6034 -77.8693 ;...
+26.6252 -78.2379 ;...
+26.4759 -78.7257 ;...
+26.5651 -78.788 ;...
+NaN NaN ;...
+26.8874 -77.8707 ;...
+26.9324 -77.7723 ;...
+26.9097 -77.5532 ;...
+26.5531 -77.0444 ;...
+26.2888 -76.9678 ;...
+26.261 -77.1279 ;...
+25.8781 -77.1838 ;...
+25.849 -77.2401 ;...
+25.9954 -77.3859 ;...
+26.119 -77.2254 ;...
+26.1834 -77.2626 ;...
+26.2706 -77.2209 ;...
+26.4374 -77.2372 ;...
+26.5537 -77.1453 ;...
+26.616 -77.3199 ;...
+26.8522 -77.5773 ;...
+26.8874 -77.8707 ;...
+NaN NaN ;...
+25.5642 -76.7079 ;...
+25.5637 -76.6476 ;...
+25.4894 -76.614 ;...
+25.2976 -76.2798 ;...
+25.1483 -76.1185 ;...
+25.0495 -76.0973 ;...
+24.752 -76.1645 ;...
+24.6404 -76.1317 ;...
+24.7462 -76.2098 ;...
+24.7971 -76.29 ;...
+24.8399 -76.1943 ;...
+24.9191 -76.2047 ;...
+25.0181 -76.1355 ;...
+25.1324 -76.1838 ;...
+25.2566 -76.2966 ;...
+25.4239 -76.7493 ;...
+25.5642 -76.7079 ;...
+NaN NaN ;...
+24.6848 -75.7234 ;...
+24.6314 -75.6203 ;...
+24.1452 -75.2979 ;...
+24.1705 -75.3665 ;...
+24.1268 -75.5053 ;...
+24.2187 -75.4386 ;...
+24.49 -75.5914 ;...
+24.5163 -75.688 ;...
+24.6848 -75.7234 ;...
+NaN NaN ;...
+23.6925 -75.2971 ;...
+23.3717 -75.0781 ;...
+23.1621 -75.0256 ;...
+23.1308 -74.9374 ;...
+22.9059 -74.8088 ;...
+22.857 -74.8472 ;...
+23.0658 -74.9639 ;...
+23.1438 -75.2077 ;...
+23.3547 -75.1494 ;...
+23.6194 -75.3314 ;...
+23.6925 -75.2971 ;...
+NaN NaN ;...
+22.739 -73.858 ;...
+22.561 -73.8082 ;...
+22.3159 -73.9569 ;...
+22.2508 -74.1654 ;...
+22.316 -74.1499 ;...
+22.3806 -74.0364 ;...
+22.4704 -74.0163 ;...
+22.56 -73.8947 ;...
+22.7342 -74.2732 ;...
+22.8546 -74.3477 ;...
+22.6966 -74.027 ;...
+22.739 -73.858 ;...
+NaN NaN ;...
+40.58 -74 ;...
+40.83 -73.92 ;...
+40.92 -73.33 ;...
+41 -72.83 ;...
+41.17 -72.33 ;...
+41 -72 ;...
+40.75 -72.67 ;...
+40.67 -73.33 ;...
+40.58 -74 ;...
+NaN NaN ;...
+49.83 -64.42 ;...
+49.83 -63.75 ;...
+49.75 -63.08 ;...
+49.58 -62.5 ;...
+49.42 -62 ;...
+49.08 -61.67 ;...
+49.08 -62.25 ;...
+49.17 -62.92 ;...
+49.33 -63.5 ;...
+49.58 -63.75 ;...
+49.83 -64.42 ;...
+NaN NaN ;...
+46.67 -64.25 ;...
+46.92 -64 ;...
+46.58 -63.92 ;...
+46.42 -63.33 ;...
+46.42 -62.75 ;...
+46.5 -62 ;...
+46.25 -62.5 ;...
+45.92 -62.5 ;...
+46.17 -63.33 ;...
+46.42 -63.92 ;...
+46.67 -64.25 ;...
+NaN NaN ;...
+62.17 -83.67 ;...
+62.58 -83.33 ;...
+62.75 -82.58 ;...
+62.75 -81.92 ;...
+62.67 -81.92 ;...
+62.25 -82.83 ;...
+62.17 -83.67 ;...
+NaN NaN ;...
+62.33 -80.08 ;...
+62.33 -79.25 ;...
+61.92 -79.33 ;...
+61.5 -79.75 ;...
+61.75 -80.17 ;...
+62.33 -80.08 ;...
+NaN NaN ;...
+56.25 -80 ;...
+56.58 -79.17 ;...
+56.33 -78.92 ;...
+55.75 -79.17 ;...
+56.17 -79.17 ;...
+55.92 -79.5 ;...
+55.9203 -79.645 ;...
+55.9203 -79.79 ;...
+55.9203 -79.935 ;...
+55.92 -80.08 ;...
+56.0228 -79.9163 ;...
+56.1254 -79.7518 ;...
+56.2278 -79.5863 ;...
+56.33 -79.42 ;...
+56.25 -80 ;...
+NaN NaN ;...
+53 -82 ;...
+53.17 -81.42 ;...
+53.08 -81 ;...
+52.67 -80.75 ;...
+52.83 -81.42 ;...
+53 -82 ;...
+NaN NaN ;...
+47.92 -59.33 ;...
+48.17 -58.92 ;...
+48.5 -58.42 ;...
+48.58 -58.75 ;...
+49 -58.42 ;...
+49.5 -57.92 ;...
+50.08 -57.67 ;...
+50.58 -57.25 ;...
+51 -57 ;...
+51.33 -56.67 ;...
+51.58 -56 ;...
+51.58 -55.5 ;...
+51.08 -55.75 ;...
+50.58 -56.17 ;...
+50.25 -56.5 ;...
+49.75 -56.83 ;...
+50.08 -56.08 ;...
+49.92 -55.5 ;...
+49.67 -55.92 ;...
+49.33 -55.33 ;...
+49.42 -54.67 ;...
+49.42 -54 ;...
+49.42 -54 ;...
+49.25 -53.5 ;...
+48.83 -54 ;...
+48.5 -53.75 ;...
+48.58 -53 ;...
+48.08 -53.75 ;...
+47.75 -53.92 ;...
+47.58 -53.67 ;...
+48 -53.33 ;...
+48.08 -53 ;...
+47.58 -53.25 ;...
+47.67 -52.75 ;...
+47.17 -52.92 ;...
+46.67 -53.08 ;...
+46.67 -53.67 ;...
+47.08 -53.67 ;...
+46.83 -54.17 ;...
+47.33 -53.92 ;...
+47.42 -54.42 ;...
+47.33 -54.83 ;...
+46.83 -55.33 ;...
+46.92 -55.92 ;...
+47.17 -55.42 ;...
+47.5 -55.42 ;...
+47.58 -56.08 ;...
+47.58 -56.83 ;...
+47.58 -57.58 ;...
+47.67 -58.42 ;...
+47.5 -59.17 ;...
+47.92 -59.33 ;...
+NaN NaN ;...
+63.67 -87.17 ;...
+64.08 -86.17 ;...
+64.67 -86.42 ;...
+65.25 -86.25 ;...
+65.67 -86 ;...
+66.08 -85 ;...
+65.58 -84.67 ;...
+65.17 -83.67 ;...
+64.83 -82.5 ;...
+64.5 -81.75 ;...
+64.08 -81.25 ;...
+63.83 -80.25 ;...
+63.42 -81.17 ;...
+63.75 -82 ;...
+64 -82.83 ;...
+64 -83.58 ;...
+63.67 -84.17 ;...
+63.25 -84.83 ;...
+63.17 -85.75 ;...
+63.67 -85.83 ;...
+63.67 -87.17 ;...
+NaN NaN ;...
+67.67 -77.25 ;...
+68.25 -76.5 ;...
+68.25 -75.33 ;...
+67.92 -74.83 ;...
+67.5 -75 ;...
+67.25 -75.67 ;...
+67.17 -76.83 ;...
+67.67 -77.25 ;...
+NaN NaN ;...
+71.75 -90 ;...
+72.33 -90 ;...
+72.75 -89.58 ;...
+73.17 -89.08 ;...
+73.58 -88 ;...
+73.83 -87 ;...
+73.83 -85.42 ;...
+73.8234 -85.0645 ;...
+73.8162 -84.7094 ;...
+73.8084 -84.3545 ;...
+73.8 -84 ;...
+73.7335 -84.3795 ;...
+73.6663 -84.756 ;...
+73.5985 -85.1295 ;...
+73.53 -85.5 ;...
+73.25 -86.08 ;...
+72.83 -86.5 ;...
+72.42 -86.42 ;...
+72 -86.33 ;...
+71.67 -85.33 ;...
+72.08 -85.67 ;...
+72.5 -85.58 ;...
+73 -85.5 ;...
+73.42 -84.33 ;...
+73.75 -82.92 ;...
+73.83 -81.67 ;...
+73.58 -81.17 ;...
+73.13 -81.5 ;...
+72.67 -80.83 ;...
+72.17 -81.17 ;...
+72.17 -81.17 ;...
+72.47 -80.17 ;...
+72.47 -79 ;...
+72.8 -77.83 ;...
+72.7 -76.83 ;...
+72.5 -75.75 ;...
+72.58 -74.33 ;...
+72.25 -74 ;...
+71.75 -74 ;...
+71.67 -72.42 ;...
+71.42 -71.25 ;...
+70.83 -69.75 ;...
+70.5 -68.33 ;...
+70.3754 -68.1187 ;...
+70.2505 -67.91 ;...
+70.1254 -67.7037 ;...
+70 -67.5 ;...
+69.8951 -67.6066 ;...
+69.7901 -67.7121 ;...
+69.6851 -67.8166 ;...
+69.58 -67.92 ;...
+69.4782 -67.6233 ;...
+69.376 -67.3294 ;...
+69.2732 -67.0383 ;...
+69.17 -66.75 ;...
+69.1285 -67.0844 ;...
+69.0863 -67.4176 ;...
+69.0435 -67.7494 ;...
+69 -68.08 ;...
+68.5 -68.08 ;...
+68.08 -66.75 ;...
+68.08 -65.17 ;...
+67.67 -64.25 ;...
+67.33 -63.17 ;...
+67 -62.17 ;...
+66.67 -61.42 ;...
+66.08 -62.08 ;...
+65.67 -62.33 ;...
+65.5 -63.5 ;...
+65 -63.83 ;...
+65.33 -65 ;...
+65.92 -65.5 ;...
+66.42 -67.17 ;...
+66.42 -67.17 ;...
+65.75 -67.42 ;...
+65.25 -66.83 ;...
+64.83 -65.75 ;...
+64.5 -65.08 ;...
+64 -65 ;...
+63.58 -64.25 ;...
+62.92 -64.5 ;...
+62.92 -65.75 ;...
+63.25 -66.83 ;...
+63.58 -67.75 ;...
+63.83 -68.92 ;...
+63.42 -68.5 ;...
+63.08 -67.75 ;...
+62.67 -66.92 ;...
+62.33 -66.17 ;...
+61.92 -66.17 ;...
+62 -67.17 ;...
+62.17 -68.08 ;...
+62.42 -69 ;...
+62.67 -69.33 ;...
+62.83 -70.33 ;...
+63 -71.08 ;...
+63.33 -71.92 ;...
+63.75 -71.92 ;...
+64 -72.75 ;...
+64.17 -73.67 ;...
+64.42 -74.67 ;...
+64.5 -75.75 ;...
+64.25 -76.5 ;...
+64.25 -78 ;...
+64.25 -78 ;...
+64.67 -78.58 ;...
+65.08 -78.25 ;...
+65.17 -77.67 ;...
+65.5 -77.5 ;...
+65.33 -76 ;...
+65.33 -74.67 ;...
+65.67 -73.67 ;...
+66.17 -74.58 ;...
+66.5 -73.75 ;...
+66.92 -73 ;...
+67.0028 -72.8342 ;...
+67.0854 -72.6673 ;...
+67.1678 -72.4992 ;...
+67.25 -72.33 ;...
+67.3551 -72.4139 ;...
+67.4601 -72.4985 ;...
+67.5651 -72.5839 ;...
+67.67 -72.67 ;...
+68.17 -73 ;...
+68.5 -74.25 ;...
+69 -75 ;...
+68.67 -76.17 ;...
+69.33 -75.83 ;...
+69.83 -77.5 ;...
+70.25 -78.08 ;...
+69.75 -79 ;...
+69.58 -79.83 ;...
+70 -81.33 ;...
+69.92 -82.33 ;...
+70.08 -84.33 ;...
+70.08 -85.75 ;...
+70.42 -87.33 ;...
+70.83 -88.25 ;...
+71.33 -89 ;...
+71.75 -90 ;...
+NaN NaN ;...
+73.75 -80.5 ;...
+73.83 -79.25 ;...
+73.78 -77.83 ;...
+73.58 -76.75 ;...
+73.17 -76.42 ;...
+72.87 -76.58 ;...
+72.98 -78.17 ;...
+72.83 -79.17 ;...
+72.98 -80.5 ;...
+73.42 -80.83 ;...
+73.75 -80.5 ;...
+NaN NaN ;...
+75 -96.58 ;...
+75.55 -95.67 ;...
+75.55 -94.5 ;...
+75.2 -93.75 ;...
+74.67 -93.75 ;...
+74.82 -95.33 ;...
+75 -96.58 ;...
+NaN NaN ;...
+77.03 -96.17 ;...
+76.92 -94.33 ;...
+76.65 -93 ;...
+76.65 -91.25 ;...
+76.37 -90.75 ;...
+76.3204 -90.4771 ;...
+76.2706 -90.2061 ;...
+76.2204 -89.9371 ;...
+76.17 -89.67 ;...
+76.1226 -89.7963 ;...
+76.0751 -89.9217 ;...
+76.0276 -90.0463 ;...
+75.98 -90.17 ;...
+75.9108 -89.8098 ;...
+75.841 -89.4531 ;...
+75.7708 -89.0999 ;...
+75.7 -88.75 ;...
+75.58 -86.75 ;...
+75.82 -84.83 ;...
+75.72 -83.08 ;...
+75.72 -81.58 ;...
+75.47 -79.83 ;...
+75.08 -79.83 ;...
+74.67 -80.42 ;...
+74.5 -82.17 ;...
+74.75 -83.67 ;...
+74.42 -84.75 ;...
+74.42 -86.5 ;...
+74.47 -88.17 ;...
+74.53 -90 ;...
+74.75 -92 ;...
+75.25 -92.17 ;...
+75.67 -91.83 ;...
+76.08 -92.17 ;...
+76.33 -93.25 ;...
+76.25 -95.08 ;...
+76.67 -96.25 ;...
+77.03 -96.17 ;...
+NaN NaN ;...
+80 -95 ;...
+80.45 -95 ;...
+80.8 -93.75 ;...
+81.25 -93.58 ;...
+81.25 -92 ;...
+80.8 -91.33 ;...
+80.5 -90.25 ;...
+80.42 -88 ;...
+80.3577 -87.7885 ;...
+80.2952 -87.5797 ;...
+80.2327 -87.3736 ;...
+80.17 -87.17 ;...
+80.0651 -87.2989 ;...
+79.9601 -87.4252 ;...
+79.8551 -87.5488 ;...
+79.75 -87.67 ;...
+79.7209 -87.2287 ;...
+79.6912 -86.79 ;...
+79.6609 -86.3537 ;...
+79.63 -85.92 ;...
+79.25 -85.92 ;...
+79 -87.25 ;...
+78.53 -88.08 ;...
+78.17 -89.25 ;...
+78.17 -92 ;...
+78.53 -93.33 ;...
+78.98 -93.67 ;...
+79.33 -93.17 ;...
+79.72 -93.83 ;...
+80 -95 ;...
+NaN NaN ;...
+81.5 -90.5 ;...
+81.92 -89 ;...
+81.92 -86.67 ;...
+81.92 -86.67 ;...
+82.13 -86.67 ;...
+82.33 -85 ;...
+82.58 -82.58 ;...
+82.92 -80.75 ;...
+83.05 -76.83 ;...
+82.98 -74.5 ;...
+83.08 -72.5 ;...
+83.08 -69.75 ;...
+82.92 -67.17 ;...
+82.8 -64.42 ;...
+82.55 -62.92 ;...
+82.42 -61.17 ;...
+82.2 -61.17 ;...
+81.95 -62.5 ;...
+81.67 -64.25 ;...
+81.42 -64.33 ;...
+80.95 -66.75 ;...
+80.58 -68.83 ;...
+80.2 -70 ;...
+79.72 -71.5 ;...
+79.5 -74 ;...
+79 -75 ;...
+78.5 -75 ;...
+78 -75.83 ;...
+77.92 -78 ;...
+77.58 -77.67 ;...
+77.25 -79.25 ;...
+76.83 -77.75 ;...
+76.42 -79 ;...
+76.17 -81 ;...
+76.17 -81 ;...
+76.5 -81.58 ;...
+76.33 -82.67 ;...
+76.42 -84 ;...
+76.25 -85.17 ;...
+76.33 -87.75 ;...
+76.42 -89.5 ;...
+76.83 -89.5 ;...
+77.13 -88.17 ;...
+77.1605 -87.8795 ;...
+77.1906 -87.5877 ;...
+77.2205 -87.2945 ;...
+77.25 -87 ;...
+77.2928 -87.2475 ;...
+77.3355 -87.4967 ;...
+77.3779 -87.7475 ;...
+77.42 -88 ;...
+77.83 -88 ;...
+77.8545 -87.3984 ;...
+77.8776 -86.7944 ;...
+77.8995 -86.1883 ;...
+77.92 -85.58 ;...
+77.9838 -86.0723 ;...
+78.0468 -86.5697 ;...
+78.1088 -87.0723 ;...
+78.1242 -87.1987 ;...
+78.1395 -87.3255 ;...
+78.1548 -87.4526 ;...
+78.17 -87.58 ;...
+78.2065 -87.4858 ;...
+78.243 -87.3909 ;...
+78.2794 -87.2955 ;...
+78.3158 -87.1995 ;...
+78.4611 -86.8096 ;...
+78.6058 -86.4099 ;...
+78.75 -86 ;...
+78.7806 -85.6478 ;...
+78.8108 -85.2938 ;...
+78.8406 -84.9378 ;...
+78.87 -84.58 ;...
+78.9651 -84.7237 ;...
+79.0601 -84.8699 ;...
+79.1551 -85.0187 ;...
+79.25 -85.17 ;...
+79.7 -85.17 ;...
+79.87 -86.67 ;...
+80.42 -85.83 ;...
+80.25 -83.67 ;...
+80.47 -82.5 ;...
+80.63 -79.58 ;...
+80.92 -77.67 ;...
+80.8 -80.67 ;...
+80.8 -83.5 ;...
+80.67 -85.5 ;...
+80.67 -88.25 ;...
+81 -90 ;...
+81.5 -90.5 ;...
+NaN NaN ;...
+79.9 -99.5 ;...
+80.17 -98.83 ;...
+79.75 -97.67 ;...
+79.9 -99.5 ;...
+NaN NaN ;...
+79.2 -106.33 ;...
+79.38 -105.67 ;...
+79.38 -104.25 ;...
+79.2 -103 ;...
+79.2 -101.42 ;...
+78.88 -100 ;...
+78.48 -100 ;...
+78.13 -99.58 ;...
+78.08 -98.58 ;...
+78.48 -98.17 ;...
+78.9 -98 ;...
+78.55 -95.83 ;...
+78.3 -95.42 ;...
+78 -95.83 ;...
+77.97 -97.17 ;...
+77.92 -98.83 ;...
+77.75 -100 ;...
+78.17 -100.83 ;...
+78.17 -102.17 ;...
+78.3 -104.17 ;...
+78.53 -105 ;...
+78.83 -104.17 ;...
+79 -105.92 ;...
+79.2 -106.33 ;...
+NaN NaN ;...
+77.67 -97 ;...
+77.83 -95 ;...
+77.8 -92.5 ;...
+77.57 -92.33 ;...
+77.35 -93.17 ;...
+77.47 -95.17 ;...
+77.67 -97 ;...
+NaN NaN ;...
+77.67 -105.67 ;...
+77.75 -104.75 ;...
+77.43 -104.58 ;...
+77.08 -104 ;...
+77.18 -105.33 ;...
+77.67 -105.67 ;...
+NaN NaN ;...
+76.58 -105 ;...
+76.63 -104 ;...
+76.37 -103 ;...
+76 -102 ;...
+76.42 -101.92 ;...
+76.7 -101.25 ;...
+76.67 -99.5 ;...
+76.38 -98 ;...
+76 -98 ;...
+75.58 -97.83 ;...
+75.07 -98.33 ;...
+75.07 -100.58 ;...
+75.63 -101.5 ;...
+75.42 -103 ;...
+75.83 -104 ;...
+76.17 -104.25 ;...
+76.58 -105 ;...
+NaN NaN ;...
+75.12 -104.75 ;...
+75.47 -104.25 ;...
+75.13 -103.58 ;...
+75.12 -104.75 ;...
+NaN NaN ;...
+72.92 -102.92 ;...
+73.07 -102 ;...
+73.03 -100.5 ;...
+73.5 -101.42 ;...
+73.83 -101.25 ;...
+74 -100.17 ;...
+73.92 -98.83 ;...
+74.13 -97.75 ;...
+74.0052 -97.5787 ;...
+73.8803 -97.4099 ;...
+73.7552 -97.2437 ;...
+73.63 -97.08 ;...
+73.5155 -97.3351 ;...
+73.4006 -97.5867 ;...
+73.2854 -97.835 ;...
+73.17 -98.08 ;...
+73.1234 -97.7246 ;...
+73.0762 -97.3712 ;...
+73.0284 -97.0196 ;...
+72.98 -96.67 ;...
+72.42 -96.25 ;...
+71.83 -96.42 ;...
+71.67 -97.83 ;...
+71.33 -99 ;...
+71.83 -99.92 ;...
+72.25 -100.92 ;...
+72.25 -102 ;...
+72.63 -102.33 ;...
+72.92 -102.92 ;...
+NaN NaN ;...
+78.25 -113.75 ;...
+78.5 -113 ;...
+78.58 -110.75 ;...
+78.38 -108.92 ;...
+78.2 -109.58 ;...
+78.17 -111 ;...
+78.3 -112 ;...
+78.25 -113.75 ;...
+NaN NaN ;...
+77.92 -115.5 ;...
+78.08 -114.25 ;...
+77.75 -114.08 ;...
+77.7 -114.83 ;...
+77.92 -115.5 ;...
+NaN NaN ;...
+77.75 -113.58 ;...
+77.87 -112.25 ;...
+78.02 -111.08 ;...
+78.0229 -110.81 ;...
+78.0255 -110.54 ;...
+78.0279 -110.27 ;...
+78.03 -110 ;...
+77.9175 -110.044 ;...
+77.805 -110.087 ;...
+77.6925 -110.129 ;...
+77.58 -110.17 ;...
+77.33 -111.75 ;...
+77.42 -113.25 ;...
+77.75 -113.58 ;...
+NaN NaN ;...
+75.58 -119.5 ;...
+75.9 -118.92 ;...
+76.13 -117.75 ;...
+75.75 -118.08 ;...
+75.48 -118.5 ;...
+75.58 -119.5 ;...
+NaN NaN ;...
+76.25 -124.17 ;...
+76.5 -122.75 ;...
+76.88 -121.67 ;...
+77.15 -120.5 ;...
+77.33 -119.33 ;...
+77.3 -117.67 ;...
+77.58 -116.75 ;...
+77.33 -115.5 ;...
+77.08 -116.58 ;...
+76.72 -116.17 ;...
+76.3 -117.5 ;...
+76.5 -118.92 ;...
+76.17 -119.5 ;...
+75.83 -120.58 ;...
+75.92 -121.92 ;...
+75.87 -123.08 ;...
+76.25 -124.17 ;...
+NaN NaN ;...
+75.25 -117.75 ;...
+75.67 -117.17 ;...
+75.67 -117.17 ;...
+76.13 -116.42 ;...
+76.47 -115.75 ;...
+76.42 -114.33 ;...
+76.17 -112.75 ;...
+75.92 -112 ;...
+75.57 -111.42 ;...
+75.5541 -110.918 ;...
+75.5371 -110.418 ;...
+75.5191 -109.918 ;...
+75.5 -109.42 ;...
+75.5827 -109.582 ;...
+75.6652 -109.746 ;...
+75.7477 -109.912 ;...
+75.83 -110.08 ;...
+75.8975 -110.018 ;...
+75.965 -109.956 ;...
+76.0325 -109.893 ;...
+76.1 -109.83 ;...
+76.1378 -110.058 ;...
+76.1754 -110.288 ;...
+76.2128 -110.518 ;...
+76.25 -110.75 ;...
+76.3327 -110.566 ;...
+76.4153 -110.379 ;...
+76.4977 -110.191 ;...
+76.58 -110 ;...
+76.8 -109.17 ;...
+76.33 -108.58 ;...
+76.02 -107 ;...
+75.87 -105.67 ;...
+75.47 -106 ;...
+75.03 -106.17 ;...
+74.95 -107.42 ;...
+75 -109.5 ;...
+74.8 -110.67 ;...
+74.53 -111.67 ;...
+74.42 -112.75 ;...
+74.5 -114 ;...
+74.72 -114.5 ;...
+74.92 -113.25 ;...
+75.25 -114.25 ;...
+74.97 -115.25 ;...
+75.13 -116.33 ;...
+75.25 -117.75 ;...
+NaN NaN ;...
+71.6 -119 ;...
+72.03 -118.83 ;...
+72.3 -118 ;...
+72.58 -118 ;...
+72.83 -117 ;...
+73.02 -115.75 ;...
+73.3 -114.83 ;...
+73.3 -113.67 ;...
+72.72 -113.25 ;...
+73.08 -111.92 ;...
+72.85 -110.83 ;...
+73 -109.58 ;...
+72.67 -108 ;...
+73.17 -107.42 ;...
+73.67 -106.58 ;...
+73.7 -105 ;...
+73.42 -104.17 ;...
+73.05 -104.67 ;...
+72.58 -105.5 ;...
+72.17 -105.17 ;...
+71.63 -104.58 ;...
+71.63 -104.58 ;...
+71.05 -104.75 ;...
+70.67 -103.75 ;...
+70.4 -102.33 ;...
+70.22 -100.67 ;...
+69.75 -100.83 ;...
+69.67 -102 ;...
+69.3 -101.5 ;...
+69 -102.58 ;...
+68.87 -103.83 ;...
+69.15 -105.08 ;...
+69.5 -106 ;...
+69.17 -107.08 ;...
+69 -108.42 ;...
+68.75 -110 ;...
+68.65 -111.67 ;...
+68.58 -113.08 ;...
+69.25 -113.75 ;...
+69.25 -115.17 ;...
+69.5 -116.42 ;...
+70.12 -116.75 ;...
+70.17 -115.5 ;...
+70.17 -114 ;...
+70.42 -112.67 ;...
+70.7 -114 ;...
+70.67 -115.42 ;...
+70.75 -117.08 ;...
+71.03 -118.08 ;...
+71.37 -117.75 ;...
+71.6 -119 ;...
+NaN NaN ;...
+72.08 -125.25 ;...
+72.5 -124.67 ;...
+72.98 -124 ;...
+73.5 -123.75 ;...
+74.08 -124.42 ;...
+74.25 -122.67 ;...
+74.5 -121.17 ;...
+74.25 -119.42 ;...
+74.25 -117.58 ;...
+73.83 -116.58 ;...
+73.5 -115.08 ;...
+73.2 -116.5 ;...
+72.92 -117.83 ;...
+72.58 -119.25 ;...
+71.92 -119.67 ;...
+71.42 -120.58 ;...
+71.33 -121.75 ;...
+71.08 -123.08 ;...
+71.67 -123.83 ;...
+72.08 -125.25 ;...
+NaN NaN ;...
+55.3371 165.686 ;...
+55.372 166.229 ;...
+55.2209 166.175 ;...
+54.8975 166.649 ;...
+54.719 166.614 ;...
+54.748 166.475 ;...
+54.8704 166.443 ;...
+54.8618 166.298 ;...
+55.099 165.993 ;...
+55.1461 166.023 ;...
+55.3371 165.686 ;...
+NaN NaN ;...
+52.92 172.5 ;...
+53 172.83 ;...
+52.83 173.33 ;...
+52.75 172.92 ;...
+52.92 172.5 ;...
+NaN NaN ;...
+51.7133 -177.919 ;...
+51.7604 -178.231 ;...
+51.8868 -178.301 ;...
+51.9132 -178.178 ;...
+51.8661 -177.907 ;...
+51.7469 -177.804 ;...
+51.7133 -177.919 ;...
+NaN NaN ;...
+51.67 -176.92 ;...
+51.92 -176.75 ;...
+51.75 -176.33 ;...
+51.67 -176.92 ;...
+NaN NaN ;...
+52.0723 -174.186 ;...
+52.0907 -174.38 ;...
+52.2028 -174.547 ;...
+52.2718 -174.302 ;...
+52.4185 -174.348 ;...
+52.1743 -173.984 ;...
+52.0723 -174.186 ;...
+NaN NaN ;...
+52.75 -169.17 ;...
+53.5 -168.5 ;...
+53.58 -168 ;...
+53.33 -168 ;...
+53.17 -168.33 ;...
+52.75 -169.17 ;...
+NaN NaN ;...
+53.33 -167.67 ;...
+53.67 -167.17 ;...
+54 -167.17 ;...
+54 -166.42 ;...
+53.7 -166.42 ;...
+53.5 -167 ;...
+53.33 -167.67 ;...
+NaN NaN ;...
+63.67 -171.75 ;...
+63.67 -170.5 ;...
+63.42 -169.83 ;...
+63.33 -168.83 ;...
+63.08 -169.83 ;...
+63.42 -170.75 ;...
+63.33 -171.75 ;...
+63.67 -171.75 ;...
+NaN NaN ;...
+60.17 -167.42 ;...
+60.25 -166.75 ;...
+60.33 -166.08 ;...
+60.25 -165.5 ;...
+60 -165.5 ;...
+59.83 -166.17 ;...
+59.92 -166.83 ;...
+60.17 -167.42 ;...
+NaN NaN ;...
+57.25 -154.83 ;...
+57.75 -153.92 ;...
+58.08 -153.25 ;...
+58.42 -152.5 ;...
+58.17 -152.08 ;...
+58.1081 -152.311 ;...
+58.0458 -152.542 ;...
+57.9831 -152.771 ;...
+57.92 -153 ;...
+57.8977 -152.875 ;...
+57.8752 -152.75 ;...
+57.8527 -152.625 ;...
+57.83 -152.5 ;...
+57.5 -152.25 ;...
+57.33 -153.08 ;...
+57 -153.42 ;...
+56.75 -154.08 ;...
+57.25 -154.83 ;...
+NaN NaN ;...
+54.17 -133.08 ;...
+54.08 -132.33 ;...
+54.08 -131.67 ;...
+53.58 -132 ;...
+53.08 -131.67 ;...
+52.75 -131.83 ;...
+52.5 -131.33 ;...
+52 -131 ;...
+52.42 -131.83 ;...
+52.83 -132.25 ;...
+53.33 -132.5 ;...
+53.58 -133 ;...
+54.17 -133.08 ;...
+NaN NaN ;...
+50.67 -128.33 ;...
+50.83 -127.92 ;...
+50.58 -127.17 ;...
+50.42 -126.25 ;...
+50.25 -125.5 ;...
+49.75 -125 ;...
+49.33 -124.42 ;...
+48.92 -123.67 ;...
+48.5 -123.17 ;...
+48.33 -123.67 ;...
+48.5 -124.25 ;...
+48.75 -125 ;...
+49 -125 ;...
+49 -125.5 ;...
+49.25 -125.92 ;...
+49.42 -126.5 ;...
+49.67 -126.42 ;...
+49.92 -127.08 ;...
+50.17 -127.75 ;...
+50.67 -128.33 ;...
+NaN NaN ;...
+78.17 -73 ;...
+78.58 -72.33 ;...
+78.75 -70.33 ;...
+79.02 -68.67 ;...
+79.2 -65.67 ;...
+79.75 -64.5 ;...
+80.08 -65 ;...
+80.08 -65 ;...
+80.082 -65.6674 ;...
+80.0826 -66.335 ;...
+80.082 -67.0026 ;...
+80.08 -67.67 ;...
+80.1927 -67.4478 ;...
+80.3053 -67.2206 ;...
+80.4177 -66.988 ;...
+80.53 -66.75 ;...
+80.92 -65 ;...
+81.18 -63.75 ;...
+81.13 -61.83 ;...
+81.42 -61.25 ;...
+81.75 -62 ;...
+82.08 -59.33 ;...
+82.33 -54.5 ;...
+82 -51.25 ;...
+82.47 -50.5 ;...
+82.42 -47.33 ;...
+82.83 -46.5 ;...
+83.2 -44 ;...
+83.42 -39.5 ;...
+83.65 -36 ;...
+83.58 -31.5 ;...
+83.55 -27.33 ;...
+83.25 -24.83 ;...
+82.98 -24 ;...
+82.85 -21.67 ;...
+82.7806 -21.2404 ;...
+82.7108 -20.819 ;...
+82.6406 -20.4056 ;...
+82.57 -20 ;...
+82.5085 -20.5559 ;...
+82.4463 -21.1028 ;...
+82.3835 -21.6408 ;...
+82.32 -22.17 ;...
+82.22 -25.58 ;...
+82.2291 -26.2455 ;...
+82.2371 -26.9124 ;...
+82.2441 -27.5807 ;...
+82.25 -28.25 ;...
+82.2056 -28.6518 ;...
+82.1607 -29.049 ;...
+82.1155 -29.4417 ;...
+82.07 -29.83 ;...
+82.0745 -28.7054 ;...
+82.076 -27.58 ;...
+82.0745 -26.4546 ;...
+82.07 -25.33 ;...
+81.9725 -25.33 ;...
+81.875 -25.33 ;...
+81.7775 -25.33 ;...
+81.68 -25.33 ;...
+81.75 -23.67 ;...
+82.03 -23.67 ;...
+82.03 -23.67 ;...
+82.08 -21.67 ;...
+81.8 -21.33 ;...
+81.63 -22 ;...
+81.37 -23.67 ;...
+80.92 -24.83 ;...
+81.08 -22.5 ;...
+81.32 -20.75 ;...
+81.58 -19.5 ;...
+81.48 -18.17 ;...
+81.8 -16.42 ;...
+81.68 -13.25 ;...
+81.5906 -12.8424 ;...
+81.5008 -12.4434 ;...
+81.4106 -12.0527 ;...
+81.32 -11.67 ;...
+81.1793 -12.3669 ;...
+81.0374 -13.0419 ;...
+80.8943 -13.696 ;...
+80.75 -14.33 ;...
+80.42 -15.83 ;...
+79.97 -17.17 ;...
+79.53 -18.08 ;...
+79.13 -18.5 ;...
+78.83 -19.5 ;...
+78.42 -19.17 ;...
+77.92 -19.67 ;...
+77.58 -18.83 ;...
+77.08 -18.17 ;...
+76.75 -18.42 ;...
+76.92 -20.33 ;...
+76.67 -21.42 ;...
+76.3 -21.5 ;...
+76.2 -20 ;...
+75.7 -19.42 ;...
+75.2 -19.42 ;...
+74.67 -20.08 ;...
+74.67 -20.08 ;...
+74.67 -18.83 ;...
+74.25 -19.25 ;...
+74 -21.5 ;...
+73.83 -20.33 ;...
+73.47 -20.33 ;...
+73.42 -21.58 ;...
+73.25 -22.17 ;...
+73.2003 -22.3793 ;...
+73.1504 -22.5874 ;...
+73.1003 -22.7943 ;...
+73.05 -23 ;...
+73.018 -22.7486 ;...
+72.9856 -22.4981 ;...
+72.953 -22.2486 ;...
+72.92 -22 ;...
+72.42 -21.75 ;...
+72.08 -22 ;...
+72.3 -23.33 ;...
+72.5 -24.58 ;...
+72.17 -23.67 ;...
+71.83 -22.58 ;...
+71.42 -21.75 ;...
+71 -21.75 ;...
+70.5 -21.83 ;...
+70.42 -23.08 ;...
+70.58 -24 ;...
+71 -24.25 ;...
+71.33 -24.5 ;...
+71.17 -25.5 ;...
+70.67 -25.25 ;...
+70.42 -26.33 ;...
+70.17 -26.33 ;...
+70.42 -25.08 ;...
+70.17 -23.75 ;...
+70.1483 -23.4365 ;...
+70.1261 -23.1236 ;...
+70.1033 -22.8115 ;...
+70.08 -22.5 ;...
+69.9977 -22.6695 ;...
+69.9153 -22.8376 ;...
+69.8327 -23.0045 ;...
+69.75 -23.17 ;...
+69.75 -23.17 ;...
+69.42 -24.25 ;...
+69.08 -25.25 ;...
+68.83 -26.25 ;...
+68.58 -27.5 ;...
+68.42 -28.75 ;...
+68.17 -30 ;...
+68.17 -31.33 ;...
+67.92 -32.17 ;...
+67.5 -33 ;...
+67.08 -33.42 ;...
+66.67 -33.92 ;...
+66.33 -34.75 ;...
+66 -35.67 ;...
+65.67 -37 ;...
+65.67 -38.5 ;...
+65.5 -39.67 ;...
+65.08 -40 ;...
+65.17 -41 ;...
+64.5 -40.42 ;...
+64 -40.5 ;...
+63.5 -40.75 ;...
+63 -41.5 ;...
+62.67 -42.42 ;...
+62 -42 ;...
+61.5 -42.5 ;...
+61 -42.75 ;...
+60.5 -42.83 ;...
+60 -43.25 ;...
+59.83 -43.92 ;...
+60.17 -45.17 ;...
+60.17 -45.17 ;...
+60.58 -45.75 ;...
+60.75 -46.67 ;...
+60.83 -48 ;...
+61.42 -49 ;...
+61.92 -49.5 ;...
+62.5 -50.08 ;...
+62.83 -50.33 ;...
+63.58 -51.25 ;...
+64 -51.33 ;...
+64.33 -52 ;...
+64.92 -52 ;...
+65.5 -52.42 ;...
+66 -53.5 ;...
+66.5 -53.5 ;...
+67 -53.75 ;...
+67.58 -53.67 ;...
+68.17 -53.33 ;...
+68.58 -52.58 ;...
+68.58 -51 ;...
+69.33 -50.75 ;...
+69.92 -51.25 ;...
+69.47 -52.17 ;...
+69.3 -53.67 ;...
+69.67 -54.83 ;...
+70.25 -54.58 ;...
+70.83 -54.25 ;...
+70.8 -52.83 ;...
+70.5 -51.25 ;...
+71 -51.58 ;...
+71.17 -52.75 ;...
+71.17 -52.75 ;...
+71.75 -53.33 ;...
+71.42 -54 ;...
+71.42 -55.25 ;...
+71.68 -55.75 ;...
+72.17 -55.33 ;...
+72.58 -55.83 ;...
+73.17 -55.5 ;...
+73.58 -55.67 ;...
+74 -56.5 ;...
+74.5 -56.5 ;...
+74.97 -57.5 ;...
+75.32 -58.33 ;...
+75.67 -58.33 ;...
+75.82 -60 ;...
+76.17 -61.67 ;...
+76.17 -63.42 ;...
+76.13 -65.33 ;...
+75.92 -66.58 ;...
+76.13 -68.5 ;...
+76.38 -69.5 ;...
+76.6 -68.75 ;...
+76.8 -70.25 ;...
+77.02 -71.17 ;...
+77.25 -70 ;...
+77.35 -68.42 ;...
+77.58 -70 ;...
+77.92 -70.33 ;...
+77.87 -71.67 ;...
+78.17 -73 ;...
+NaN NaN ;...
+65.5 -24.33 ;...
+65.83 -23.75 ;...
+66.17 -23.67 ;...
+66.17 -23 ;...
+66.42 -23 ;...
+66.33 -22.25 ;...
+66 -21.5 ;...
+65.42 -21.42 ;...
+65.42 -21.42 ;...
+65.58 -20.33 ;...
+66.08 -20.25 ;...
+65.83 -19.67 ;...
+66.08 -19.25 ;...
+66.17 -18.25 ;...
+66 -17.58 ;...
+66.17 -16.83 ;...
+66.5 -16.25 ;...
+66.25 -15.67 ;...
+66.33 -14.75 ;...
+65.75 -14.67 ;...
+65.5 -13.67 ;...
+64.92 -13.67 ;...
+64.42 -14.5 ;...
+64.17 -15.83 ;...
+63.83 -16.75 ;...
+63.75 -17.67 ;...
+63.42 -18.75 ;...
+63.58 -20 ;...
+63.83 -21 ;...
+63.832 -21.4375 ;...
+63.8326 -21.875 ;...
+63.832 -22.3125 ;...
+63.83 -22.75 ;...
+63.9354 -22.5646 ;...
+64.0405 -22.3778 ;...
+64.1454 -22.1896 ;...
+64.25 -22 ;...
+64.3752 -22.143 ;...
+64.5003 -22.2873 ;...
+64.6252 -22.433 ;...
+64.75 -22.58 ;...
+64.75 -24 ;...
+65.08 -22.83 ;...
+65.1653 -22.6443 ;...
+65.2505 -22.4574 ;...
+65.3354 -22.2693 ;...
+65.42 -22.08 ;...
+65.4603 -22.2467 ;...
+65.5004 -22.414 ;...
+65.5403 -22.5817 ;...
+65.58 -22.75 ;...
+65.42 -23.58 ;...
+65.5 -24.33 ;...
+NaN NaN ;...
+70.8948 -9.05404 ;...
+71.0174 -8.50616 ;...
+71.1505 -8.30805 ;...
+71.1521 -7.9249 ;...
+70.9907 -8.005 ;...
+70.9223 -8.25562 ;...
+70.9071 -8.60836 ;...
+70.8065 -9.04743 ;...
+70.8948 -9.05404 ;...
+NaN NaN ;...
+75.4 -18.83 ;...
+75.05 -17.92 ;...
+75 -18.83 ;...
+75.4 -18.83 ;...
+NaN NaN ;...
+0 9.33 ;...
+0.5 9.33 ;...
+0.92 9.58 ;...
+1.08 9.25 ;...
+1.5 9.5 ;...
+1.92 9.67 ;...
+2.5 9.75 ;...
+3.08 9.92 ;...
+3.5 9.58 ;...
+3.92 9.33 ;...
+4 8.92 ;...
+4.5 8.83 ;...
+4.5 8.42 ;...
+4.42 8 ;...
+4.42 7.5 ;...
+4.33 7 ;...
+4.25 6.5 ;...
+4.25 6 ;...
+4.58 5.42 ;...
+5.25 5.25 ;...
+5.92 5 ;...
+6.33 4.5 ;...
+6.5 4 ;...
+6.5 3.42 ;...
+6.42 2.83 ;...
+6.42 2.33 ;...
+6.33 1.83 ;...
+6.33 1.83 ;...
+6.17 1.25 ;...
+5.83 1 ;...
+5.83 0.33 ;...
+5.5 -0.25 ;...
+5.25 -0.67 ;...
+5.17 -1.08 ;...
+5 -1.58 ;...
+4.75 -2 ;...
+5 -2.58 ;...
+5.08 -3.08 ;...
+5.25 -3.83 ;...
+5.25 -4.33 ;...
+5.17 -4.75 ;...
+5.08 -5.33 ;...
+5 -5.92 ;...
+4.67 -6.33 ;...
+4.58 -6.92 ;...
+4.25 -7.42 ;...
+4.33 -7.83 ;...
+4.5 -8.33 ;...
+4.92 -8.92 ;...
+5.25 -9.42 ;...
+5.58 -9.83 ;...
+6.08 -10.33 ;...
+6.42 -11 ;...
+6.83 -11.42 ;...
+7.08 -11.92 ;...
+7.33 -12.42 ;...
+7.83 -12.92 ;...
+8.25 -13.08 ;...
+8.25 -13.08 ;...
+8.67 -13.17 ;...
+9.17 -13.25 ;...
+9.58 -13.58 ;...
+10 -14 ;...
+10.25 -14.42 ;...
+10.67 -14.67 ;...
+11.08 -15 ;...
+11.25 -15.42 ;...
+11.92 -15.5 ;...
+11.83 -15.92 ;...
+12.08 -16.33 ;...
+12.33 -16.75 ;...
+12.92 -16.75 ;...
+13.42 -16.67 ;...
+14 -16.75 ;...
+14.5 -17 ;...
+14.75 -17.42 ;...
+15 -17 ;...
+15.42 -16.75 ;...
+15.83 -16.42 ;...
+16.5 -16.42 ;...
+17 -16.25 ;...
+17.5 -16.08 ;...
+18 -16 ;...
+18.5 -16 ;...
+19 -16.17 ;...
+19.5 -16.5 ;...
+19.83 -16.17 ;...
+20.33 -16.17 ;...
+20.75 -16.58 ;...
+20.75 -16.58 ;...
+21.08 -17 ;...
+21.58 -16.92 ;...
+22.08 -16.83 ;...
+22.33 -16.5 ;...
+22.75 -16.25 ;...
+23.42 -16 ;...
+23.83 -15.75 ;...
+24.25 -15.42 ;...
+24.67 -15 ;...
+25.25 -14.75 ;...
+25.67 -14.58 ;...
+26.17 -14.42 ;...
+26.42 -14.17 ;...
+26.67 -13.67 ;...
+27 -13.42 ;...
+27.5 -13.25 ;...
+27.92 -12.92 ;...
+28 -12.17 ;...
+28.25 -11.5 ;...
+28.67 -11.17 ;...
+28.92 -10.58 ;...
+29.33 -10.17 ;...
+29.75 -9.83 ;...
+30.33 -9.58 ;...
+30.67 -9.83 ;...
+31.5 -9.83 ;...
+31.75 -9.5 ;...
+32.17 -9.25 ;...
+32.5 -9.25 ;...
+32.83 -8.83 ;...
+32.83 -8.83 ;...
+33.25 -8.5 ;...
+33.5 -8 ;...
+33.67 -7.42 ;...
+34 -6.83 ;...
+34.42 -6.58 ;...
+34.83 -6.33 ;...
+35.25 -6.17 ;...
+35.75 -5.92 ;...
+35.83 -5.42 ;...
+35.5 -5.17 ;...
+35.17 -4.5 ;...
+35.25 -4.08 ;...
+35.25 -3.42 ;...
+35.33 -3 ;...
+35.08 -2.42 ;...
+35.17 -1.92 ;...
+35.42 -1.25 ;...
+35.75 -1 ;...
+35.83 -0.5 ;...
+35.92 0 ;...
+36.25 0.5 ;...
+36.5 1 ;...
+36.58 1.75 ;...
+36.67 2.42 ;...
+36.83 2.92 ;...
+36.83 3.42 ;...
+36.92 3.83 ;...
+36.92 4.42 ;...
+36.83 5 ;...
+36.67 5.33 ;...
+36.67 5.33 ;...
+36.75 5.83 ;...
+37 6.25 ;...
+36.83 7 ;...
+37 7.42 ;...
+36.83 8.17 ;...
+36.92 8.67 ;...
+37.17 9.08 ;...
+37.25 9.58 ;...
+37.17 10.08 ;...
+36.67 10.33 ;...
+36.92 11 ;...
+36.5 10.75 ;...
+36.33 10.42 ;...
+35.75 10.58 ;...
+35.58 10.92 ;...
+35.17 11 ;...
+34.75 10.75 ;...
+34.42 10.33 ;...
+34.17 10 ;...
+33.75 10.08 ;...
+33.58 10.5 ;...
+33.67 10.92 ;...
+33.25 11.08 ;...
+33 11.67 ;...
+32.83 12.17 ;...
+32.83 12.83 ;...
+32.75 13.58 ;...
+32.67 14.17 ;...
+32.42 14.75 ;...
+32.25 15.33 ;...
+32.25 15.33 ;...
+31.75 15.33 ;...
+31.5 15.58 ;...
+31.25 16.08 ;...
+31.17 16.67 ;...
+31 17.5 ;...
+30.67 18.17 ;...
+30.33 18.67 ;...
+30.25 19.17 ;...
+30.42 19.58 ;...
+30.67 19.92 ;...
+31.08 20.08 ;...
+31.5 20 ;...
+31.5825 19.9576 ;...
+31.665 19.9152 ;...
+31.7475 19.8726 ;...
+31.83 19.83 ;...
+31.915 19.8923 ;...
+32.0001 19.9548 ;...
+32.085 20.0173 ;...
+32.17 20.08 ;...
+32.58 20.67 ;...
+32.75 21.25 ;...
+32.83 21.83 ;...
+32.75 22.5 ;...
+32.58 23.08 ;...
+32.17 23.17 ;...
+32.08 23.75 ;...
+31.92 24.33 ;...
+31.92 24.92 ;...
+31.5 25.17 ;...
+31.58 25.75 ;...
+31.5 26.33 ;...
+31.42 26.92 ;...
+31.25 27.33 ;...
+31.08 27.83 ;...
+31.08 28.42 ;...
+31.08 28.42 ;...
+30.83 29 ;...
+30.92 29.5 ;...
+31.17 30 ;...
+31.42 30.58 ;...
+31.5 31.17 ;...
+31.42 31.83 ;...
+31.08 31.75 ;...
+30.92 32.15 ;...
+30.6901 32.2532 ;...
+30.4602 32.356 ;...
+30.2301 32.4582 ;...
+30 32.56 ;...
+29.9575 32.5174 ;...
+29.9151 32.4549 ;...
+29.8725 32.3924 ;...
+29.83 32.33 ;...
+29.7275 32.3927 ;...
+29.6251 32.4553 ;...
+29.5225 32.5177 ;...
+29.42 32.58 ;...
+29 32.67 ;...
+28.58 32.92 ;...
+28.17 33.25 ;...
+27.83 33.58 ;...
+27.42 33.67 ;...
+27.08 33.92 ;...
+26.67 34 ;...
+26.17 34.33 ;...
+25.75 34.5 ;...
+25.25 34.75 ;...
+24.83 35 ;...
+24.33 35.25 ;...
+24 35.58 ;...
+23.58 35.5 ;...
+23.25 35.58 ;...
+22.75 35.83 ;...
+22.5 36.17 ;...
+22.25 36.58 ;...
+22.08 36.92 ;...
+21.5 36.92 ;...
+21.08 37.25 ;...
+20.75 37.25 ;...
+20.25 37.17 ;...
+19.67 37.25 ;...
+19.25 37.33 ;...
+18.67 37.58 ;...
+18.42 38.08 ;...
+18.42 38.08 ;...
+18 38.58 ;...
+17.58 38.83 ;...
+17.08 39.08 ;...
+16.5 39.17 ;...
+16 39.25 ;...
+15.58 39.5 ;...
+15.5 39.83 ;...
+15 40.08 ;...
+14.75 40.67 ;...
+14.58 41.17 ;...
+14.08 41.58 ;...
+13.67 42.08 ;...
+13.25 42.42 ;...
+12.83 42.83 ;...
+12.42 43.25 ;...
+11.92 43.42 ;...
+11.67 42.92 ;...
+11.42 43.25 ;...
+11.08 43.58 ;...
+10.67 43.92 ;...
+10.42 44.25 ;...
+10.33 44.75 ;...
+10.5 45.25 ;...
+10.75 45.75 ;...
+10.67 46.33 ;...
+10.75 46.83 ;...
+11.08 47.42 ;...
+11.08 48 ;...
+11.25 48.5 ;...
+11.17 49 ;...
+11.17 49 ;...
+11.33 49.58 ;...
+11.5 50.17 ;...
+11.92 50.58 ;...
+11.75 51.17 ;...
+11 51.08 ;...
+10.42 51 ;...
+10 50.75 ;...
+9.42 50.75 ;...
+8.92 50.42 ;...
+8.5 50.17 ;...
+8.17 49.92 ;...
+7.67 49.75 ;...
+7.25 49.5 ;...
+6.75 49.25 ;...
+6.33 49.08 ;...
+5.75 48.92 ;...
+5.42 48.5 ;...
+5.08 48.17 ;...
+4.58 47.92 ;...
+4.17 47.67 ;...
+3.75 47.25 ;...
+3.42 46.92 ;...
+3 46.5 ;...
+2.58 46.17 ;...
+2.25 45.75 ;...
+1.92 45.33 ;...
+1.75 44.83 ;...
+1.42 44.42 ;...
+1 44 ;...
+0.75 43.58 ;...
+0.75 43.58 ;...
+0.42 43.17 ;...
+0 42.92 ;...
+-0.42 42.42 ;...
+-0.92 42.08 ;...
+-1.25 41.75 ;...
+-1.75 41.5 ;...
+-2.08 41.08 ;...
+-2.5 40.67 ;...
+-2.42 40.25 ;...
+-3 40.25 ;...
+-3.58 39.92 ;...
+-4.17 39.58 ;...
+-4.58 39.33 ;...
+-5.08 39.08 ;...
+-5.58 38.92 ;...
+-6.08 38.75 ;...
+-6.58 39.17 ;...
+-7.08 39.58 ;...
+-7.58 39.33 ;...
+-8.17 39.33 ;...
+-8.83 39.42 ;...
+-9.42 39.67 ;...
+-9.92 39.83 ;...
+-10.17 40.17 ;...
+-10.5 40.5 ;...
+-11 40.5 ;...
+-12.5 40.42 ;...
+-12 40.5 ;...
+-12.42 40.5 ;...
+-13 40.42 ;...
+-13 40.42 ;...
+-13.58 40.58 ;...
+-14.08 40.58 ;...
+-14.67 40.75 ;...
+-15.17 40.67 ;...
+-15.58 40.42 ;...
+-16 40.08 ;...
+-16.33 39.75 ;...
+-16.75 39.25 ;...
+-17.08 38.92 ;...
+-17.17 38.25 ;...
+-17.33 37.83 ;...
+-17.5 37.33 ;...
+-17.92 37 ;...
+-18.33 36.67 ;...
+-18.83 36.25 ;...
+-19 35.83 ;...
+-19.42 35.5 ;...
+-19.67 35.17 ;...
+-20 34.75 ;...
+-20.5 34.67 ;...
+-20.83 35 ;...
+-21.42 35.17 ;...
+-22.08 35.33 ;...
+-22.5 35.5 ;...
+-23.08 35.5 ;...
+-23.5 35.42 ;...
+-24 35.5 ;...
+-24.5 35.25 ;...
+-24.75 34.92 ;...
+-24.92 34.42 ;...
+-24.92 34.42 ;...
+-25.17 33.75 ;...
+-25.33 33.42 ;...
+-25.5 32.92 ;...
+-26.08 32.58 ;...
+-26.25 32.92 ;...
+-26.58 32.92 ;...
+-27 32.92 ;...
+-27.5 32.75 ;...
+-28.08 32.58 ;...
+-28.58 32.42 ;...
+-28.83 32 ;...
+-29.17 31.5 ;...
+-29.58 31.25 ;...
+-30 30.92 ;...
+-30.5 30.67 ;...
+-30.92 30.42 ;...
+-31.33 30 ;...
+-31.67 29.58 ;...
+-32 29.25 ;...
+-32.42 28.83 ;...
+-32.75 28.5 ;...
+-33 28.08 ;...
+-33.33 27.5 ;...
+-33.67 27.08 ;...
+-33.75 26.58 ;...
+-33.75 26 ;...
+-34 25.75 ;...
+-34 25.08 ;...
+-34.25 24.75 ;...
+-34.17 24.25 ;...
+-34.17 24.25 ;...
+-34 23.67 ;...
+-34.17 23.25 ;...
+-34.08 22.67 ;...
+-34.17 22.08 ;...
+-34.42 21.75 ;...
+-34.42 21.08 ;...
+-34.5 20.5 ;...
+-34.75 20.17 ;...
+-34.75 19.58 ;...
+-34.42 19.17 ;...
+-34.08 18.75 ;...
+-34.25 18.42 ;...
+-33.83 18.42 ;...
+-33.42 18.25 ;...
+-32.83 17.83 ;...
+-32.83 18.08 ;...
+-32.58 18.33 ;...
+-32 18.25 ;...
+-31.42 17.92 ;...
+-31 17.58 ;...
+-30.58 17.33 ;...
+-30 17.17 ;...
+-29.58 16.92 ;...
+-29 16.75 ;...
+-28.58 16.42 ;...
+-28.25 15.92 ;...
+-27.83 15.67 ;...
+-27.5 15.33 ;...
+-27 15.25 ;...
+-26.42 15.08 ;...
+-26.42 15.08 ;...
+-25.92 14.92 ;...
+-25.5 14.83 ;...
+-25 14.83 ;...
+-24.42 14.58 ;...
+-24 14.5 ;...
+-23.5 14.5 ;...
+-22.92 14.42 ;...
+-22.5 14.5 ;...
+-22.08 14.33 ;...
+-21.67 13.92 ;...
+-21.25 13.75 ;...
+-20.83 13.42 ;...
+-20.25 13.25 ;...
+-19.75 12.92 ;...
+-19.25 12.67 ;...
+-18.83 12.42 ;...
+-18.5 12.08 ;...
+-17.92 11.83 ;...
+-17.42 11.75 ;...
+-17 11.75 ;...
+-16.5 11.83 ;...
+-16 11.83 ;...
+-15.5 12.08 ;...
+-15 12.17 ;...
+-14.5 12.33 ;...
+-13.92 12.5 ;...
+-13.33 12.67 ;...
+-12.92 13 ;...
+-12.67 13.42 ;...
+-12.25 13.67 ;...
+-12.25 13.67 ;...
+-11.83 13.83 ;...
+-11.25 13.92 ;...
+-10.67 13.83 ;...
+-10.33 13.58 ;...
+-9.92 13.33 ;...
+-9.42 13.17 ;...
+-9.17 13 ;...
+-8.67 13.42 ;...
+-8.17 13.25 ;...
+-7.75 13 ;...
+-7.25 12.92 ;...
+-6.75 12.67 ;...
+-6.42 12.42 ;...
+-6 12.33 ;...
+-5.75 12.17 ;...
+-5.33 12.17 ;...
+-4.92 11.92 ;...
+-4.5 11.58 ;...
+-4.08 11.33 ;...
+-3.75 11 ;...
+-3.42 10.67 ;...
+-3.08 10.33 ;...
+-2.75 9.83 ;...
+-2.25 9.5 ;...
+-1.83 9.17 ;...
+-1.33 9 ;...
+-0.83 8.75 ;...
+-0.5 9.25 ;...
+0 9.33 ;...
+NaN NaN ;...
+30 32.58 ;...
+30.92 32.17 ;...
+31.25 32.33 ;...
+31 32.67 ;...
+31.08 33.25 ;...
+31.17 34 ;...
+31.5 34.33 ;...
+32 34.67 ;...
+32.5 34.83 ;...
+33 35 ;...
+33.5 35.25 ;...
+34.08 35.5 ;...
+34.5 35.83 ;...
+35 35.75 ;...
+35.67 35.75 ;...
+36 35.92 ;...
+36.33 35.75 ;...
+36.67 36.08 ;...
+36.92 36 ;...
+36.58 35.5 ;...
+36.58 35.17 ;...
+36.83 34.5 ;...
+36.5 34.08 ;...
+36.17 33.58 ;...
+36.17 33.58 ;...
+36.17 33.17 ;...
+36 32.75 ;...
+36.17 32.25 ;...
+36.5 32 ;...
+36.75 31.25 ;...
+36.83 30.67 ;...
+36.42 30.5 ;...
+36.17 29.75 ;...
+36.33 29.25 ;...
+36.75 28.58 ;...
+36.75 28 ;...
+37 28.17 ;...
+37 27.67 ;...
+37.0002 27.545 ;...
+37.0003 27.42 ;...
+37.0002 27.295 ;...
+37 27.17 ;...
+37.0826 27.2522 ;...
+37.1651 27.3346 ;...
+37.2476 27.4172 ;...
+37.33 27.5 ;...
+37.3926 27.3953 ;...
+37.4552 27.2904 ;...
+37.5176 27.1853 ;...
+37.58 27.08 ;...
+37.92 27.17 ;...
+38.08 26.75 ;...
+38.25 26.25 ;...
+38.58 26.33 ;...
+38.33 26.67 ;...
+38.42 26.92 ;...
+38.67 26.75 ;...
+39.25 26.67 ;...
+39.5 26.83 ;...
+39.42 26 ;...
+40 26.17 ;...
+40.25 26.17 ;...
+40.58 26.58 ;...
+40.5 26.08 ;...
+40.83 25.75 ;...
+40.92 25.17 ;...
+40.83 24.75 ;...
+40.92 24.25 ;...
+40.67 24 ;...
+40.67 23.5 ;...
+40.42 23.75 ;...
+40.17 23.58 ;...
+40.17 23.08 ;...
+40.58 22.58 ;...
+40 22.5 ;...
+39.58 22.83 ;...
+39.25 23.17 ;...
+38.92 23.33 ;...
+38.67 23.67 ;...
+38.5 24.08 ;...
+38.17 24.17 ;...
+38.17 24.17 ;...
+38 24.58 ;...
+38.17 24 ;...
+37.58 24 ;...
+37.92 23.5 ;...
+38 22.92 ;...
+38.33 22.58 ;...
+38.3 21.83 ;...
+38.33 21.08 ;...
+38.75 20.67 ;...
+39.25 20.33 ;...
+39.58 20.08 ;...
+40 19.75 ;...
+40.25 19.33 ;...
+40.75 19.33 ;...
+41.33 19.42 ;...
+41.75 19.5 ;...
+42 19 ;...
+42.33 18.5 ;...
+42.58 17.92 ;...
+43 17.33 ;...
+43.42 16.58 ;...
+43.5 16 ;...
+43.75 15.75 ;...
+44 15.17 ;...
+44.33 15.17 ;...
+44.58 14.92 ;...
+45 14.83 ;...
+45.33 14.17 ;...
+45 14.08 ;...
+44.83 13.75 ;...
+44.83 13.75 ;...
+45.08 13.5 ;...
+45.42 13.42 ;...
+45.67 13.58 ;...
+45.75 13.08 ;...
+45.5 12.58 ;...
+45.5 12.17 ;...
+45.17 12.08 ;...
+44.92 12.33 ;...
+44.58 12.17 ;...
+44.25 12.25 ;...
+43.92 12.67 ;...
+43.67 13.08 ;...
+43.42 13.58 ;...
+43 13.83 ;...
+42.58 13.92 ;...
+42.33 14.17 ;...
+42.08 14.67 ;...
+41.83 15.25 ;...
+41.83 16.08 ;...
+41.5 15.83 ;...
+41.33 16.33 ;...
+41.08 16.92 ;...
+40.83 17.42 ;...
+40.58 17.92 ;...
+40.17 18.42 ;...
+39.83 18.25 ;...
+39.92 17.92 ;...
+40.25 17.83 ;...
+40.33 17.33 ;...
+40.5 16.92 ;...
+40.5 16.92 ;...
+40.17 16.67 ;...
+39.67 16.5 ;...
+39.42 17 ;...
+39 17 ;...
+38.83 16.5 ;...
+38.42 16.5 ;...
+38.17 16.17 ;...
+37.92 16 ;...
+37.92 15.67 ;...
+38.17 15.67 ;...
+38.58 15.83 ;...
+38.83 16.08 ;...
+39.25 16 ;...
+39.67 15.75 ;...
+40 15.67 ;...
+40.17 15 ;...
+40.58 14.75 ;...
+40.67 14.42 ;...
+40.92 14 ;...
+41.25 13.58 ;...
+41.25 12.92 ;...
+41.5 12.42 ;...
+41.92 12 ;...
+42.33 11.5 ;...
+42.33 11 ;...
+42.67 10.92 ;...
+43.08 10.42 ;...
+43.5 10.17 ;...
+44 10.08 ;...
+44.08 9.5 ;...
+44.08 9.5 ;...
+44.25 9.17 ;...
+44.33 8.67 ;...
+44.17 8.25 ;...
+43.83 8 ;...
+43.67 7.42 ;...
+43.42 6.83 ;...
+43.08 6.42 ;...
+43 5.83 ;...
+43.25 5.25 ;...
+43.33 4.58 ;...
+43.5 4 ;...
+43.25 3.58 ;...
+43 3 ;...
+42.5 3.08 ;...
+41.92 3.17 ;...
+41.67 2.67 ;...
+41.33 2.08 ;...
+41.17 1.5 ;...
+41 1 ;...
+40.42 0.5 ;...
+39.92 0 ;...
+39.5 -0.33 ;...
+39.08 -0.25 ;...
+38.9977 -0.124564 ;...
+38.9153 0.000581242 ;...
+38.8327 0.125435 ;...
+38.75 0.25 ;...
+38.6678 0.1045 ;...
+38.5854 -0.0406663 ;...
+38.5028 -0.185499 ;...
+38.42 -0.33 ;...
+38.08 -0.67 ;...
+37.67 -0.83 ;...
+37.67 -1.33 ;...
+37.17 -1.83 ;...
+36.75 -2.17 ;...
+36.75 -2.17 ;...
+36.75 -2.75 ;...
+36.75 -3.33 ;...
+36.75 -3.92 ;...
+36.67 -4.5 ;...
+36.5 -5 ;...
+36.08 -5.5 ;...
+36.25 -6 ;...
+36.75 -6.33 ;...
+37 -6.5 ;...
+37.25 -7 ;...
+37.25 -7.42 ;...
+37.08 -7.83 ;...
+37.17 -8.33 ;...
+37.08 -8.92 ;...
+37.5 -8.75 ;...
+38 -8.75 ;...
+38.5 -8.67 ;...
+38.5 -9.08 ;...
+38.83 -9.33 ;...
+39.33 -9.25 ;...
+39.67 -8.92 ;...
+40.25 -8.83 ;...
+40.75 -8.67 ;...
+41.17 -8.58 ;...
+41.67 -8.75 ;...
+42 -8.83 ;...
+42.42 -8.75 ;...
+43 -9.17 ;...
+43.25 -8.92 ;...
+43.33 -8.33 ;...
+43.33 -8.33 ;...
+43.75 -8 ;...
+43.5 -7.25 ;...
+43.5 -6.58 ;...
+43.58 -5.83 ;...
+43.5 -5.08 ;...
+43.33 -4.42 ;...
+43.5 -3.58 ;...
+43.42 -2.75 ;...
+43.33 -2.17 ;...
+43.42 -1.67 ;...
+43.67 -1.42 ;...
+44.25 -1.25 ;...
+44.67 -1.17 ;...
+45.25 -1.08 ;...
+45.83 -1 ;...
+46.25 -1.25 ;...
+46.5 -1.75 ;...
+46.75 -2 ;...
+47 -1.92 ;...
+47.33 -2.33 ;...
+47.5 -2.5 ;...
+47.58 -3.17 ;...
+47.83 -3.75 ;...
+47.83 -4.25 ;...
+48 -4.67 ;...
+48.5 -4.75 ;...
+48.58 -4.17 ;...
+48.75 -3.67 ;...
+48.83 -3 ;...
+48.5 -2.67 ;...
+48.5 -2.67 ;...
+48.58 -2 ;...
+48.58 -1.42 ;...
+49.17 -1.58 ;...
+49.67 -1.92 ;...
+49.67 -1.33 ;...
+49.25 -1.08 ;...
+49.33 -0.58 ;...
+49.25 0 ;...
+49.67 0.17 ;...
+49.83 0.58 ;...
+49.92 1.08 ;...
+50.08 1.5 ;...
+50.5 1.58 ;...
+50.83 1.75 ;...
+51 2.33 ;...
+51.25 3 ;...
+51.42 3.58 ;...
+51.75 4 ;...
+52.08 4.33 ;...
+52.42 4.58 ;...
+52.83 4.75 ;...
+52.92 5.08 ;...
+52.42 5.08 ;...
+52.25 5.5 ;...
+52.5 5.92 ;...
+52.75 5.92 ;...
+52.83 5.5 ;...
+53.25 5.5 ;...
+53.42 6.08 ;...
+53.42 7 ;...
+53.42 7 ;...
+53.67 7.33 ;...
+53.67 8 ;...
+53.5 8.5 ;...
+53.83 8.58 ;...
+54.08 9 ;...
+54.33 8.67 ;...
+54.5 9 ;...
+54.83 8.67 ;...
+55.33 8.67 ;...
+55.58 8.17 ;...
+56 8.08 ;...
+56.5 8.08 ;...
+56.83 8.25 ;...
+57.08 8.67 ;...
+57.17 9.42 ;...
+57.5 9.83 ;...
+57.58 10.5 ;...
+57.25 10.5 ;...
+56.83 10.25 ;...
+56.5 10.25 ;...
+56.5 10.83 ;...
+56.17 10.83 ;...
+56 10.17 ;...
+55.5 9.75 ;...
+55 9.42 ;...
+54.75 10 ;...
+54.5 10 ;...
+54.25 10.92 ;...
+53.92 10.75 ;...
+53.92 11.33 ;...
+53.92 11.33 ;...
+54.17 11.83 ;...
+54.33 12.25 ;...
+54.42 12.92 ;...
+54.67 13.33 ;...
+54.33 13.67 ;...
+54.17 13.25 ;...
+54 13.83 ;...
+53.92 14.33 ;...
+54.17 15.17 ;...
+54.25 16 ;...
+54.58 16.67 ;...
+54.75 17.5 ;...
+54.83 18.33 ;...
+54.33 18.67 ;...
+54.33 19.25 ;...
+54.5 19.75 ;...
+54.92 20 ;...
+55.17 20.75 ;...
+55.67 21.08 ;...
+56.17 21 ;...
+56.75 21 ;...
+57.08 21.33 ;...
+57.58 21.67 ;...
+57.67 22.42 ;...
+57.42 22.92 ;...
+57.08 23.25 ;...
+57 23.83 ;...
+57.25 24.33 ;...
+57.75 24.33 ;...
+58.25 24.33 ;...
+58.25 24.33 ;...
+58.33 23.75 ;...
+58.75 23.5 ;...
+59.17 23.5 ;...
+59.42 24.17 ;...
+59.5 25 ;...
+59.58 25.92 ;...
+59.42 26.83 ;...
+59.42 27.92 ;...
+59.67 28.33 ;...
+60 29.17 ;...
+59.9582 29.421 ;...
+59.9159 29.6713 ;...
+59.8732 29.921 ;...
+59.83 30.17 ;...
+59.9351 30.066 ;...
+60.0402 29.9613 ;...
+60.1451 29.856 ;...
+60.25 29.75 ;...
+60.17 29.08 ;...
+60.58 28.33 ;...
+60.58 27.17 ;...
+60.42 26.25 ;...
+60.25 25.33 ;...
+60.08 24.42 ;...
+60 23.5 ;...
+60.17 22.58 ;...
+60.42 22.08 ;...
+60.67 21.33 ;...
+61 21.33 ;...
+61.5 21.67 ;...
+62 21.25 ;...
+62.5 21.17 ;...
+63 21.58 ;...
+63.42 22.42 ;...
+63.92 23.25 ;...
+64.33 24.08 ;...
+64.33 24.08 ;...
+64.83 24.67 ;...
+65 25.33 ;...
+65.5 25.33 ;...
+65.83 24.5 ;...
+65.75 23.83 ;...
+65.75 23.17 ;...
+65.83 22.42 ;...
+65.5 22 ;...
+65.33 21.42 ;...
+65.08 21.5 ;...
+64.75 21 ;...
+64.5 21.67 ;...
+64.17 21 ;...
+63.83 20.67 ;...
+63.58 19.83 ;...
+63.33 19.17 ;...
+63 18.42 ;...
+62.5 17.75 ;...
+62.25 17.42 ;...
+61.75 17.33 ;...
+61.25 17.17 ;...
+60.83 17.25 ;...
+60.5 17.92 ;...
+60.17 18.58 ;...
+59.83 18.83 ;...
+59.58 18.58 ;...
+59.33 18 ;...
+59.17 18.33 ;...
+58.92 17.58 ;...
+58.58 16.83 ;...
+58.58 16.83 ;...
+58.25 16.67 ;...
+57.75 16.67 ;...
+57.33 16.5 ;...
+56.75 16.33 ;...
+56.17 15.92 ;...
+56.17 15.33 ;...
+56.17 14.67 ;...
+55.75 14.17 ;...
+55.42 14.25 ;...
+55.42 13.67 ;...
+55.42 13 ;...
+55.75 13 ;...
+56.17 12.67 ;...
+56.5 12.92 ;...
+56.92 12.33 ;...
+57.42 12 ;...
+58 11.67 ;...
+58.5 11.33 ;...
+59.08 11.08 ;...
+59.33 10.58 ;...
+59 10.25 ;...
+59 9.75 ;...
+58.58 9 ;...
+58.25 8.5 ;...
+58 7.67 ;...
+58.08 6.83 ;...
+58.33 6 ;...
+58.58 5.58 ;...
+59 5.67 ;...
+59.33 6.17 ;...
+59.33 6.17 ;...
+59.25 5.42 ;...
+59.58 5.25 ;...
+59.92 5.58 ;...
+60.33 5.08 ;...
+60.75 5 ;...
+60.855 4.95792 ;...
+60.96 4.91556 ;...
+61.065 4.87292 ;...
+61.17 4.83 ;...
+61.2526 4.93417 ;...
+61.3352 5.03889 ;...
+61.4176 5.14417 ;...
+61.5 5.25 ;...
+61.5826 5.16816 ;...
+61.6651 5.08588 ;...
+61.7476 5.00316 ;...
+61.83 4.92 ;...
+61.9154 5.10593 ;...
+62.0005 5.29291 ;...
+62.0854 5.48093 ;...
+62.17 5.67 ;...
+62.5 6.33 ;...
+62.92 7 ;...
+63 8 ;...
+63.33 8.5 ;...
+63.75 8.92 ;...
+63.5 9.5 ;...
+63.92 10.08 ;...
+64.33 10.67 ;...
+64.92 11.33 ;...
+65.25 12.17 ;...
+65.75 12.58 ;...
+66.17 13 ;...
+66.58 13.33 ;...
+67 13.83 ;...
+67.42 14.67 ;...
+67.83 15 ;...
+68.33 16.17 ;...
+68.25 15 ;...
+68.17 14 ;...
+67.83 12.92 ;...
+68.25 13.67 ;...
+68.25 13.67 ;...
+68.2935 14.0006 ;...
+68.3363 14.3325 ;...
+68.3785 14.6656 ;...
+68.42 15 ;...
+68.4827 14.8562 ;...
+68.5452 14.7116 ;...
+68.6077 14.5662 ;...
+68.67 14.42 ;...
+68.733 14.6456 ;...
+68.7956 14.8724 ;...
+68.858 15.1006 ;...
+68.92 15.33 ;...
+69.25 16.17 ;...
+68.75 15.92 ;...
+69 16.58 ;...
+68.67 16.67 ;...
+68.753 16.895 ;...
+68.8356 17.1216 ;...
+68.918 17.35 ;...
+69 17.58 ;...
+69.0202 17.4354 ;...
+69.0402 17.2905 ;...
+69.0602 17.1454 ;...
+69.08 17 ;...
+69.42 17 ;...
+69.58 18.08 ;...
+70.08 18.92 ;...
+70.08 20.17 ;...
+70.25 21.5 ;...
+70.67 22.33 ;...
+70.92 23.33 ;...
+70.67 24.25 ;...
+71 24.83 ;...
+71.17 25.67 ;...
+70.58 25.58 ;...
+70.92 26.58 ;...
+70.5 26.83 ;...
+71.08 27.75 ;...
+70.92 29.08 ;...
+70.67 30.17 ;...
+70.42 31.17 ;...
+70.08 30 ;...
+70.1033 29.6885 ;...
+70.1261 29.3764 ;...
+70.1483 29.0635 ;...
+70.17 28.75 ;...
+70.0858 29.0663 ;...
+70.0011 29.3801 ;...
+69.9158 29.6913 ;...
+69.83 30 ;...
+69.75 31.25 ;...
+69.75 31.25 ;...
+69.92 32.08 ;...
+69.75 33.08 ;...
+69.42 33.08 ;...
+69.33 34.5 ;...
+69.17 35.83 ;...
+68.92 37 ;...
+68.58 38 ;...
+68.25 39 ;...
+68 40 ;...
+67.67 40.92 ;...
+67.25 41.25 ;...
+66.75 41.25 ;...
+66.42 40.58 ;...
+66.17 39.5 ;...
+66.08 38.33 ;...
+66.25 37.25 ;...
+66.33 36.25 ;...
+66.42 35.33 ;...
+66.67 34.33 ;...
+66.83 33.17 ;...
+66.9155 32.9424 ;...
+67.0007 32.7132 ;...
+67.0855 32.4824 ;...
+67.17 32.25 ;...
+67.0452 32.3972 ;...
+66.9203 32.543 ;...
+66.7952 32.6872 ;...
+66.67 32.83 ;...
+66.42 33.67 ;...
+66 34.67 ;...
+65.5 34.67 ;...
+65.08 34.67 ;...
+64.58 34.67 ;...
+64.33 35.75 ;...
+63.92 36.75 ;...
+63.92 38 ;...
+63.92 38 ;...
+64.42 38 ;...
+64.42 37.25 ;...
+64.83 36.5 ;...
+65.17 36.92 ;...
+65.08 37.75 ;...
+64.83 38.67 ;...
+64.67 39.5 ;...
+64.58 40.67 ;...
+65.08 40.17 ;...
+65.1852 40.0465 ;...
+65.2902 39.922 ;...
+65.3952 39.7965 ;...
+65.5 39.67 ;...
+65.5829 39.8755 ;...
+65.6656 40.0824 ;...
+65.7479 40.2905 ;...
+65.83 40.5 ;...
+66.08 41.5 ;...
+66.42 42.17 ;...
+66.33 43.17 ;...
+66.17 43.92 ;...
+66.5 44.5 ;...
+67 44.5 ;...
+67.17 43.83 ;...
+67.67 44.17 ;...
+68.25 44.17 ;...
+68.67 43.25 ;...
+68.5 44.5 ;...
+68.5 45.67 ;...
+68.25 46.25 ;...
+67.83 46.67 ;...
+67.75 45.5 ;...
+67.6452 45.3531 ;...
+67.5403 45.2074 ;...
+67.4352 45.0631 ;...
+67.33 44.92 ;...
+67.2905 45.1486 ;...
+67.2506 45.3765 ;...
+67.2105 45.6036 ;...
+67.17 45.83 ;...
+66.83 46.33 ;...
+66.83 47.33 ;...
+66.83 47.33 ;...
+67.08 48.17 ;...
+67.67 48.17 ;...
+67.83 49.08 ;...
+68.08 50.17 ;...
+68.33 51 ;...
+68.5 52.08 ;...
+68.75 53 ;...
+69 53.92 ;...
+68.5 54 ;...
+68.17 54.83 ;...
+68.5 55.17 ;...
+68.58 56.17 ;...
+68.5 57.25 ;...
+68.83 58.17 ;...
+69 59.08 ;...
+68.42 59.08 ;...
+68.33 59.83 ;...
+68.67 59.83 ;...
+68.75 60.5 ;...
+69 61.08 ;...
+69.33 60.5 ;...
+69.67 60.17 ;...
+69.92 59.17 ;...
+70.17 58.58 ;...
+70.5 59.08 ;...
+70.08 60.17 ;...
+69.75 61.75 ;...
+69.67 63 ;...
+69.33 64.08 ;...
+69.25 65.08 ;...
+69.25 65.08 ;...
+69 66 ;...
+68.83 67.08 ;...
+68.5 67.75 ;...
+68.25 68.58 ;...
+68.75 69.08 ;...
+69 68.33 ;...
+69.5 67.83 ;...
+69.67 66.92 ;...
+70 66.92 ;...
+70.42 67.33 ;...
+70.83 66.83 ;...
+71.25 67 ;...
+71.58 68.25 ;...
+72 68.75 ;...
+72.5 68.92 ;...
+72.92 69.5 ;...
+73.33 70.08 ;...
+73.373 70.3505 ;...
+73.4157 70.6223 ;...
+73.458 70.8955 ;...
+73.5 71.17 ;...
+73.4176 71.2968 ;...
+73.3351 71.4224 ;...
+73.2526 71.5468 ;...
+73.17 71.67 ;...
+73.1076 71.5841 ;...
+73.0451 71.4988 ;...
+72.9826 71.4141 ;...
+72.92 71.33 ;...
+72.8585 71.709 ;...
+72.7964 72.0853 ;...
+72.7335 72.459 ;...
+72.67 72.83 ;...
+72.585 72.83 ;...
+72.5 72.83 ;...
+72.415 72.83 ;...
+72.33 72.83 ;...
+72.2276 72.7258 ;...
+72.1251 72.6227 ;...
+72.0226 72.5208 ;...
+71.92 72.42 ;...
+71.8152 72.27 ;...
+71.7102 72.1217 ;...
+71.6052 71.9751 ;...
+71.5 71.83 ;...
+71.3954 72.0434 ;...
+71.2905 72.2545 ;...
+71.1853 72.4634 ;...
+71.08 72.67 ;...
+70.955 72.67 ;...
+70.83 72.67 ;...
+70.705 72.67 ;...
+70.58 72.67 ;...
+70.455 72.67 ;...
+70.33 72.67 ;...
+70.205 72.67 ;...
+70.08 72.67 ;...
+69.935 72.647 ;...
+69.79 72.6244 ;...
+69.645 72.602 ;...
+69.5 72.58 ;...
+69.375 72.58 ;...
+69.25 72.58 ;...
+69.125 72.58 ;...
+69 72.58 ;...
+68.8956 72.8336 ;...
+68.7907 73.0847 ;...
+68.6855 73.3335 ;...
+68.58 73.58 ;...
+68.58 73.58 ;...
+68.17 73.17 ;...
+67.75 73.17 ;...
+67.33 72.5 ;...
+67 71.83 ;...
+66.67 70.5 ;...
+66.25 71.58 ;...
+66.5 72.5 ;...
+66.83 73.5 ;...
+67.25 74 ;...
+67.67 74.83 ;...
+68.25 74.5 ;...
+68.75 74.58 ;...
+68.813 74.8081 ;...
+68.8756 75.0374 ;...
+68.938 75.268 ;...
+69 75.5 ;...
+69.0214 75.1061 ;...
+69.0418 74.7114 ;...
+69.0614 74.3161 ;...
+69.08 73.92 ;...
+69.67 73.67 ;...
+70.17 73.83 ;...
+70.58 74.33 ;...
+71 73.83 ;...
+71.33 73.17 ;...
+71.83 73.75 ;...
+72 74.67 ;...
+72.33 75.17 ;...
+72.67 75.08 ;...
+72.92 74.17 ;...
+73.08 74.5 ;...
+72.75 75.5 ;...
+72.25 75.75 ;...
+71.83 75.33 ;...
+71.33 75.33 ;...
+71.17 76.92 ;...
+71.17 76.92 ;...
+71 78.5 ;...
+71.25 78 ;...
+71.5 76.58 ;...
+71.92 76.08 ;...
+72.08 76.92 ;...
+71.83 77.75 ;...
+72 78.33 ;...
+72.0833 78.002 ;...
+72.1661 77.671 ;...
+72.2483 77.337 ;...
+72.33 77 ;...
+72.3928 77.1856 ;...
+72.4554 77.3724 ;...
+72.5178 77.5606 ;...
+72.58 77.75 ;...
+72.33 78.5 ;...
+72.33 79.5 ;...
+72.08 80.58 ;...
+71.67 81.58 ;...
+71.7108 81.8905 ;...
+71.751 82.2024 ;...
+71.7908 82.5155 ;...
+71.83 82.83 ;...
+71.9352 82.6678 ;...
+72.0403 82.5037 ;...
+72.1452 82.3378 ;...
+72.25 82.17 ;...
+72.42 80.83 ;...
+72.92 80.83 ;...
+73.5 80.33 ;...
+73.58 82 ;...
+73.67 83.75 ;...
+73.83 85.25 ;...
+73.92 87 ;...
+74.33 86.5 ;...
+74.67 86.58 ;...
+75 87.5 ;...
+75.33 88.67 ;...
+75.58 90.58 ;...
+75.75 92.17 ;...
+76.08 93.25 ;...
+76.08 94.75 ;...
+76.08 94.75 ;...
+76.1444 95.305 ;...
+76.2076 95.865 ;...
+76.2694 96.43 ;...
+76.33 97 ;...
+76.2475 96.916 ;...
+76.1651 96.8331 ;...
+76.0825 96.7511 ;...
+76 96.67 ;...
+76.0454 97.3513 ;...
+76.0889 98.0368 ;...
+76.1304 98.7264 ;...
+76.17 99.42 ;...
+76.2526 99.3169 ;...
+76.3351 99.2125 ;...
+76.4176 99.1069 ;...
+76.5 99 ;...
+76.5 101 ;...
+77 101 ;...
+77.42 102.5 ;...
+77.75 104.08 ;...
+77.6689 104.59 ;...
+77.5868 105.093 ;...
+77.5039 105.59 ;...
+77.42 106.08 ;...
+77.3362 105.613 ;...
+77.2516 105.153 ;...
+77.1662 104.699 ;...
+77.08 104.25 ;...
+77.0811 104.687 ;...
+77.0815 105.125 ;...
+77.0811 105.563 ;...
+77.08 106 ;...
+77.0608 106.377 ;...
+77.0411 106.752 ;...
+77.0208 107.127 ;...
+77 107.5 ;...
+76.8754 107.222 ;...
+76.7506 106.95 ;...
+76.6254 106.683 ;...
+76.5 106.42 ;...
+76.5007 106.772 ;...
+76.501 107.125 ;...
+76.5007 107.478 ;...
+76.5 107.83 ;...
+76.75 108.17 ;...
+76.75 109.58 ;...
+76.67 111.17 ;...
+76.5 112.42 ;...
+76.17 113.5 ;...
+75.75 113.83 ;...
+75.25 113.67 ;...
+74.92 112.5 ;...
+74.5 111 ;...
+74.17 109.67 ;...
+73.75 108.33 ;...
+73.58 107.17 ;...
+73.17 106.5 ;...
+73.17 107.58 ;...
+73.33 108.83 ;...
+73.5 110.17 ;...
+73.5 110.17 ;...
+73.5627 110.004 ;...
+73.6253 109.837 ;...
+73.6877 109.669 ;...
+73.75 109.5 ;...
+73.8127 109.666 ;...
+73.8753 109.832 ;...
+73.9377 110.001 ;...
+74 110.17 ;...
+73.83 111.17 ;...
+73.67 112 ;...
+73.67 113 ;...
+73.5 113.67 ;...
+73.67 114.83 ;...
+73.75 115.92 ;...
+73.58 117.25 ;...
+73.58 118.67 ;...
+73.17 118.5 ;...
+73 120 ;...
+72.92 122 ;...
+73 123.33 ;...
+73.33 122.83 ;...
+73.67 123 ;...
+73.67 124.83 ;...
+73.42 126.92 ;...
+73.25 128.58 ;...
+72.75 129.83 ;...
+72.25 129.5 ;...
+71.83 128.75 ;...
+71.33 129.42 ;...
+70.92 130.33 ;...
+70.75 131.25 ;...
+70.75 131.25 ;...
+71.08 132 ;...
+71.5 132.25 ;...
+71.92 132.58 ;...
+71.5 133.5 ;...
+71.33 134.42 ;...
+71.58 135 ;...
+71.67 135.83 ;...
+71.5 136.67 ;...
+71.58 138.58 ;...
+71.5612 138.979 ;...
+71.5417 139.377 ;...
+71.5212 139.774 ;...
+71.5 140.17 ;...
+71.6051 140.047 ;...
+71.7102 139.923 ;...
+71.8151 139.797 ;...
+71.92 139.67 ;...
+72.25 139.33 ;...
+72.5 139.75 ;...
+72.5 141.17 ;...
+72.83 140.67 ;...
+72.75 142.42 ;...
+72.67 143.83 ;...
+72.58 145.25 ;...
+72.33 146.83 ;...
+72.33 148.5 ;...
+72.17 149.83 ;...
+71.67 150 ;...
+71.42 150.67 ;...
+71.42 151.58 ;...
+71.17 152.42 ;...
+70.83 152.42 ;...
+70.92 153.5 ;...
+70.92 154.58 ;...
+71.08 156 ;...
+71 157.5 ;...
+71 157.5 ;...
+70.92 158.83 ;...
+70.67 159.67 ;...
+70.5651 159.754 ;...
+70.4601 159.837 ;...
+70.3551 159.919 ;...
+70.25 160 ;...
+70.1451 159.873 ;...
+70.0402 159.747 ;...
+69.9351 159.623 ;...
+69.83 159.5 ;...
+69.7682 159.795 ;...
+69.706 160.088 ;...
+69.6432 160.38 ;...
+69.58 160.67 ;...
+69.58 162.25 ;...
+69.67 164 ;...
+69.58 165.67 ;...
+69.42 167 ;...
+69.67 167.67 ;...
+70 168.25 ;...
+69.83 169.42 ;...
+69.58 169.17 ;...
+69.58 168.25 ;...
+69.25 168.33 ;...
+69.08 169.33 ;...
+68.75 169.58 ;...
+68.83 170.42 ;...
+69.08 171 ;...
+69.5 170.58 ;...
+70.08 170.58 ;...
+70 172 ;...
+69.92 173.33 ;...
+69.83 174.67 ;...
+69.83 176.17 ;...
+69.58 177.42 ;...
+69.42 178.67 ;...
+69.17 179.75 ;...
+69.08 180 ;...
+69.08 180 ;...
+68.75 180.92 ;...
+68.42 181.83 ;...
+68.17 183.08 ;...
+67.83 184.17 ;...
+67.5 185 ;...
+67.08 185.5 ;...
+67.08 186.83 ;...
+66.92 188.33 ;...
+66.5 189.17 ;...
+66.3956 189.423 ;...
+66.2908 189.674 ;...
+66.1856 189.923 ;...
+66.08 190.17 ;...
+66.0178 189.981 ;...
+65.9555 189.793 ;...
+65.8928 189.606 ;...
+65.83 189.42 ;...
+65.5 188.92 ;...
+65.5 187.83 ;...
+65 187.83 ;...
+64.67 187.25 ;...
+64.25 187 ;...
+64.42 186 ;...
+64.75 184.92 ;...
+65 184.17 ;...
+65.42 184 ;...
+65.58 183 ;...
+65.42 182.33 ;...
+65.5 181.5 ;...
+65.92 181.17 ;...
+66.33 181 ;...
+66 180.08 ;...
+65.67 180.75 ;...
+65.17 180.33 ;...
+65.05 180 ;...
+65.05 180 ;...
+64.92 179.58 ;...
+64.67 178.67 ;...
+64.75 177.5 ;...
+64.33 177.5 ;...
+64.33 178.33 ;...
+64 178.58 ;...
+63.5 178.83 ;...
+63 179.25 ;...
+62.67 179.67 ;...
+62.25 179 ;...
+62.5 178.17 ;...
+62.5 177 ;...
+62.25 176.25 ;...
+62.08 175.33 ;...
+61.75 174.58 ;...
+61.67 173.83 ;...
+61.33 173.08 ;...
+61 172.33 ;...
+60.67 171.5 ;...
+60.42 170.67 ;...
+60.42 170.67 ;...
+59.92 170.33 ;...
+60.25 169.83 ;...
+60.5 169.17 ;...
+60.58 168.25 ;...
+60.42 167.5 ;...
+60.17 166.83 ;...
+59.75 166.17 ;...
+60.33 166.17 ;...
+60.17 165.33 ;...
+59.83 164.75 ;...
+60 164.17 ;...
+59.83 163.33 ;...
+59.42 163.17 ;...
+59 162.92 ;...
+58.58 162.33 ;...
+58.17 161.92 ;...
+57.75 162.08 ;...
+57.92 162.58 ;...
+57.67 163.17 ;...
+57.25 162.67 ;...
+56.83 162.67 ;...
+56.67 163.08 ;...
+56.17 163.25 ;...
+56 162.83 ;...
+56.25 162.42 ;...
+56.08 161.92 ;...
+55.67 161.58 ;...
+55.17 161.67 ;...
+54.75 162.08 ;...
+54.5 161.67 ;...
+54.5 161.67 ;...
+54.58 161.17 ;...
+54.42 160.42 ;...
+54.08 159.92 ;...
+53.58 159.83 ;...
+53.17 159.92 ;...
+53.17 159.17 ;...
+52.83 158.58 ;...
+52.25 158.42 ;...
+51.67 157.92 ;...
+51.25 157.33 ;...
+50.92 156.67 ;...
+51.25 156.5 ;...
+51.67 156.5 ;...
+52.17 156.42 ;...
+52.75 156.08 ;...
+53.33 156 ;...
+53.92 155.83 ;...
+54.33 155.67 ;...
+54.83 155.5 ;...
+55.25 155.42 ;...
+55.83 155.5 ;...
+56.33 155.75 ;...
+56.83 156 ;...
+57 156.67 ;...
+57.42 157 ;...
+57.75 156.83 ;...
+57.75 157.5 ;...
+58 158.25 ;...
+58.42 159 ;...
+58.83 159.67 ;...
+58.83 159.67 ;...
+59.25 160.08 ;...
+59.67 160.92 ;...
+60.08 161.58 ;...
+60.42 161.92 ;...
+60.67 162.67 ;...
+60.83 163.5 ;...
+61.17 163.75 ;...
+61.75 163.92 ;...
+62.25 164.08 ;...
+62.5 164.75 ;...
+62.67 164.42 ;...
+62.5 163.25 ;...
+62.08 162.92 ;...
+61.67 162.92 ;...
+61.58 162.25 ;...
+61.25 161.67 ;...
+60.92 161 ;...
+60.58 160.17 ;...
+60.92 159.92 ;...
+61.25 159.83 ;...
+61.3952 159.953 ;...
+61.5402 160.078 ;...
+61.6852 160.203 ;...
+61.83 160.33 ;...
+61.7905 160.122 ;...
+61.7506 159.914 ;...
+61.7105 159.707 ;...
+61.67 159.5 ;...
+61.83 158.83 ;...
+61.75 157.92 ;...
+61.58 156.92 ;...
+61.17 156.33 ;...
+60.75 155.75 ;...
+60.42 155 ;...
+60 154.5 ;...
+59.5 154.17 ;...
+59.5 154.17 ;...
+59.42 154.83 ;...
+59.3576 154.915 ;...
+59.2951 155.001 ;...
+59.2326 155.085 ;...
+59.17 155.17 ;...
+59.1485 154.877 ;...
+59.1263 154.584 ;...
+59.1035 154.292 ;...
+59.08 154 ;...
+59.17 153.33 ;...
+59 152.92 ;...
+58.83 152 ;...
+58.83 151.33 ;...
+59.08 151.08 ;...
+59.33 151.67 ;...
+59.58 151.33 ;...
+59.5 150.83 ;...
+59.58 150.17 ;...
+59.75 149.5 ;...
+59.67 149 ;...
+59.42 148.92 ;...
+59.25 148.83 ;...
+59.33 148.17 ;...
+59.25 147.42 ;...
+59.42 146.5 ;...
+59.17 145.92 ;...
+59.33 145.58 ;...
+59.33 144.67 ;...
+59.33 143.67 ;...
+59.25 142.67 ;...
+59 142 ;...
+58.67 141.67 ;...
+58.42 141 ;...
+58.08 140.58 ;...
+57.75 140.25 ;...
+57.5 139.67 ;...
+57.5 139.67 ;...
+57.17 139 ;...
+56.83 138.5 ;...
+56.33 137.92 ;...
+55.92 137.25 ;...
+55.58 136.58 ;...
+55.17 135.92 ;...
+54.92 135.17 ;...
+54.67 135.25 ;...
+54.5 135.92 ;...
+54.58 136.67 ;...
+54.17 136.67 ;...
+53.75 136.67 ;...
+54.17 137.08 ;...
+54.25 137.67 ;...
+53.83 137.67 ;...
+53.7476 137.584 ;...
+53.6651 137.499 ;...
+53.5826 137.415 ;...
+53.5 137.33 ;...
+53.5432 137.559 ;...
+53.5859 137.789 ;...
+53.6282 138.019 ;...
+53.67 138.25 ;...
+54.17 138.83 ;...
+54.17 139.5 ;...
+54 140.17 ;...
+53.75 140.25 ;...
+53.5 140.75 ;...
+53.25 141.33 ;...
+52.83 141.08 ;...
+52.42 141.08 ;...
+52.17 141.33 ;...
+51.83 141.08 ;...
+51.42 140.75 ;...
+51 140.5 ;...
+50.5 140.42 ;...
+50.5 140.42 ;...
+50 140.5 ;...
+49.42 140.5 ;...
+49 140.33 ;...
+48.5 140.17 ;...
+48 139.5 ;...
+47.5 139 ;...
+47.08 138.5 ;...
+46.5 138.25 ;...
+46.08 137.92 ;...
+45.67 137.5 ;...
+45.25 136.92 ;...
+44.75 136.33 ;...
+44.33 135.67 ;...
+43.92 135.5 ;...
+43.42 135 ;...
+43.17 134.33 ;...
+42.83 133.75 ;...
+42.67 133.08 ;...
+42.92 132.33 ;...
+43.25 132.33 ;...
+43.25 131.75 ;...
+43 131.5 ;...
+42.58 131.17 ;...
+42.58 130.83 ;...
+42.25 130.67 ;...
+42.08 130.08 ;...
+41.58 129.58 ;...
+41.33 129.67 ;...
+40.83 129.67 ;...
+40.67 129.25 ;...
+40.67 129.25 ;...
+40.33 128.75 ;...
+40.08 128.25 ;...
+39.92 127.83 ;...
+39.8977 127.705 ;...
+39.8753 127.58 ;...
+39.8527 127.455 ;...
+39.83 127.33 ;...
+39.7675 127.373 ;...
+39.705 127.415 ;...
+39.6425 127.458 ;...
+39.6269 127.468 ;...
+39.6113 127.479 ;...
+39.5956 127.489 ;...
+39.58 127.5 ;...
+39.5594 127.489 ;...
+39.5388 127.479 ;...
+39.5181 127.468 ;...
+39.4975 127.457 ;...
+39.415 127.415 ;...
+39.3325 127.372 ;...
+39.25 127.33 ;...
+39.1877 127.455 ;...
+39.1253 127.58 ;...
+39.0627 127.705 ;...
+39 127.83 ;...
+38.67 128.25 ;...
+38.25 128.5 ;...
+37.92 128.83 ;...
+37.5 129.17 ;...
+37 129.33 ;...
+36.5 129.42 ;...
+36 129.42 ;...
+35.58 129.33 ;...
+35.17 129.08 ;...
+34.92 128.5 ;...
+34.83 127.75 ;...
+34.67 127.17 ;...
+34.33 126.58 ;...
+34.83 126.33 ;...
+35.33 126.33 ;...
+35.75 126.75 ;...
+36.33 126.5 ;...
+36.92 126.17 ;...
+37 126.75 ;...
+37.42 126.58 ;...
+37.75 126.08 ;...
+37.67 125.33 ;...
+38.08 124.75 ;...
+38.08 124.75 ;...
+38.5 125 ;...
+38.92 125.17 ;...
+39.42 125.42 ;...
+39.58 125.08 ;...
+39.67 124.67 ;...
+39.83 124.08 ;...
+39.75 123.5 ;...
+39.58 123 ;...
+39.42 122.42 ;...
+39.08 122.08 ;...
+38.83 121.58 ;...
+38.75 121.17 ;...
+39.17 121.67 ;...
+39.5 121.33 ;...
+39.75 121.5 ;...
+40 121.92 ;...
+40.42 122.25 ;...
+40.83 121.92 ;...
+40.92 121.25 ;...
+40.58 120.83 ;...
+40.17 120.42 ;...
+40 119.92 ;...
+39.75 119.42 ;...
+39.42 119.25 ;...
+39.17 119 ;...
+39.08 118.42 ;...
+39.25 118 ;...
+38.92 117.67 ;...
+38.58 117.58 ;...
+38.25 117.92 ;...
+38.25 117.92 ;...
+38 118.33 ;...
+37.75 118.83 ;...
+37.33 118.92 ;...
+37.08 119.33 ;...
+37.08 119.75 ;...
+37.33 120 ;...
+37.58 120.33 ;...
+37.75 120.83 ;...
+37.5 121.17 ;...
+37.42 121.58 ;...
+37.42 122.08 ;...
+37.3977 122.205 ;...
+37.3753 122.33 ;...
+37.3527 122.455 ;...
+37.33 122.58 ;...
+37.205 122.517 ;...
+37.0801 122.455 ;...
+36.955 122.392 ;...
+36.83 122.33 ;...
+37 122 ;...
+36.75 121.5 ;...
+36.58 120.83 ;...
+36.17 120.67 ;...
+35.92 120.17 ;...
+35.58 119.67 ;...
+35.25 119.42 ;...
+34.83 119.17 ;...
+34.58 119.75 ;...
+34.33 120.33 ;...
+33.83 120.5 ;...
+33.42 120.67 ;...
+33 120.83 ;...
+32.58 120.92 ;...
+32.42 121.25 ;...
+32.08 121.67 ;...
+31.67 121.83 ;...
+31.67 121.83 ;...
+31.67 121.25 ;...
+31.25 121.83 ;...
+30.92 121.92 ;...
+30.83 121.5 ;...
+30.67 121.17 ;...
+30.42 120.92 ;...
+30.33 120.42 ;...
+30.17 120.75 ;...
+30.25 121.33 ;...
+30.08 121.67 ;...
+30 122.08 ;...
+29.75 122 ;...
+29.17 121.92 ;...
+29.17 121.67 ;...
+28.67 121.5 ;...
+28.42 121.58 ;...
+28.17 121.42 ;...
+28.17 121.08 ;...
+27.83 120.83 ;...
+27.42 120.58 ;...
+27 120.33 ;...
+26.67 120 ;...
+26.75 119.67 ;...
+26.33 119.75 ;...
+26 119.67 ;...
+25.58 119.42 ;...
+25.25 119.17 ;...
+25 118.83 ;...
+24.58 118.67 ;...
+24.58 118.17 ;...
+24.58 118.17 ;...
+24.25 118.17 ;...
+24 117.83 ;...
+23.67 117.42 ;...
+23.42 117 ;...
+23.08 116.5 ;...
+22.83 116 ;...
+22.75 115.5 ;...
+22.67 115 ;...
+22.67 114.5 ;...
+22.25 114.17 ;...
+22.67 113.67 ;...
+22.17 113.5 ;...
+22.5 113.08 ;...
+22 113 ;...
+21.83 112.5 ;...
+21.75 112 ;...
+21.5 111.5 ;...
+21.42 110.92 ;...
+21.25 110.42 ;...
+20.92 110.25 ;...
+20.5 110.5 ;...
+20.33 110.25 ;...
+20.33 109.92 ;...
+20.75 109.75 ;...
+21 109.67 ;...
+21.42 109.75 ;...
+21.42 109.42 ;...
+21.67 109 ;...
+21.67 108.5 ;...
+21.5 108 ;...
+21.5 108 ;...
+21.25 107.58 ;...
+21 107.08 ;...
+20.67 106.67 ;...
+20.25 106.42 ;...
+19.92 105.92 ;...
+19.5 105.75 ;...
+19 105.58 ;...
+18.42 105.92 ;...
+18.08 106.33 ;...
+17.75 106.42 ;...
+17.33 106.83 ;...
+16.92 107.17 ;...
+16.5 107.67 ;...
+16.17 108.08 ;...
+15.75 108.42 ;...
+15.33 108.83 ;...
+14.75 109 ;...
+14.25 109.17 ;...
+13.75 109.25 ;...
+13.25 109.25 ;...
+12.83 109.25 ;...
+12.17 109.17 ;...
+11.75 109.17 ;...
+11.42 109 ;...
+11.17 108.5 ;...
+10.92 108.08 ;...
+10.67 107.67 ;...
+10.42 107.25 ;...
+10.42 106.67 ;...
+9.83 106.67 ;...
+9.83 106.67 ;...
+9.42 106.33 ;...
+9.33 105.83 ;...
+9 105.42 ;...
+8.67 105 ;...
+9.08 104.83 ;...
+9.58 104.83 ;...
+9.70501 104.872 ;...
+9.83001 104.915 ;...
+9.95501 104.957 ;...
+10.08 105 ;...
+10.1226 104.875 ;...
+10.1651 104.75 ;...
+10.2076 104.625 ;...
+10.25 104.5 ;...
+10.58 104.25 ;...
+10.58 103.67 ;...
+10.92 103.25 ;...
+11.33 103 ;...
+11.75 102.83 ;...
+12.08 102.58 ;...
+12.25 102.25 ;...
+12.67 101.83 ;...
+12.67 101.42 ;...
+12.75 100.92 ;...
+13.33 100.92 ;...
+13.5 100.42 ;...
+13.33 100 ;...
+12.75 99.92 ;...
+12.08 99.92 ;...
+11.67 99.75 ;...
+11.17 99.58 ;...
+10.83 99.33 ;...
+10.25 99.17 ;...
+9.67 99.17 ;...
+9.17 99.33 ;...
+9.25 99.83 ;...
+9.25 99.83 ;...
+8.58 99.92 ;...
+8.25 100.25 ;...
+7.75 100.42 ;...
+7.17 100.58 ;...
+6.92 100.92 ;...
+6.83 101.5 ;...
+6.42 101.83 ;...
+6.08 102.25 ;...
+5.75 102.67 ;...
+5.42 103.08 ;...
+4.92 103.42 ;...
+4.42 103.42 ;...
+3.92 103.42 ;...
+3.42 103.42 ;...
+2.92 103.42 ;...
+2.58 103.75 ;...
+2 104.08 ;...
+1.42 104.25 ;...
+1.42 103.5 ;...
+1.67 103.17 ;...
+1.92 102.75 ;...
+2.17 102.25 ;...
+2.5 101.92 ;...
+2.83 101.42 ;...
+3.33 101.33 ;...
+3.75 100.92 ;...
+4.25 100.58 ;...
+4.75 100.67 ;...
+5.08 100.42 ;...
+5.67 100.33 ;...
+5.67 100.33 ;...
+6.08 100.33 ;...
+6.67 100.08 ;...
+7 99.75 ;...
+7.33 99.42 ;...
+7.83 99.08 ;...
+8.25 98.67 ;...
+8.08 98.42 ;...
+8.5 98.25 ;...
+9 98.25 ;...
+9.58 98.5 ;...
+10.17 98.5 ;...
+10.67 98.5 ;...
+11 98.75 ;...
+11.42 98.75 ;...
+11.92 98.5 ;...
+12.5 98.75 ;...
+13.08 98.58 ;...
+13.67 98.25 ;...
+14.25 98.08 ;...
+14.75 97.92 ;...
+15.33 97.83 ;...
+15.83 97.75 ;...
+16.33 97.58 ;...
+16.75 97.25 ;...
+16.92 96.92 ;...
+16.58 96.75 ;...
+16.33 96.25 ;...
+16.08 95.83 ;...
+15.75 95.5 ;...
+15.75 95.08 ;...
+15.75 95.08 ;...
+15.83 94.75 ;...
+16.08 94.25 ;...
+16.58 94.42 ;...
+17.17 94.58 ;...
+17.75 94.5 ;...
+18.33 94.33 ;...
+18.83 94.17 ;...
+18.83 93.83 ;...
+19.25 93.58 ;...
+19.5 93.75 ;...
+19.92 93.5 ;...
+19.92 93.17 ;...
+20.25 92.92 ;...
+20.67 92.5 ;...
+21.08 92.17 ;...
+21.5 92 ;...
+21.92 91.92 ;...
+22.33 91.75 ;...
+22.75 91.42 ;...
+22.42 90.92 ;...
+22.17 90.58 ;...
+21.83 90.25 ;...
+21.75 89.75 ;...
+21.67 89.25 ;...
+21.58 88.75 ;...
+21.5 88.33 ;...
+22.08 88.17 ;...
+21.75 88 ;...
+21.58 87.58 ;...
+21.42 87.08 ;...
+21.42 87.08 ;...
+21.08 86.92 ;...
+20.75 87 ;...
+20.33 86.75 ;...
+19.92 86.42 ;...
+19.75 85.92 ;...
+19.67 85.5 ;...
+19.33 85.08 ;...
+19 84.83 ;...
+18.58 84.5 ;...
+18.25 84.08 ;...
+18.08 83.67 ;...
+17.83 83.33 ;...
+17.42 83 ;...
+17.17 82.58 ;...
+17 82.33 ;...
+16.58 82.25 ;...
+16.33 82 ;...
+16.25 81.5 ;...
+16.17 81.17 ;...
+15.75 81 ;...
+15.83 80.42 ;...
+15.42 80.17 ;...
+15 80.08 ;...
+14.42 80.17 ;...
+13.83 80.25 ;...
+13.25 80.33 ;...
+12.67 80.25 ;...
+12.17 80 ;...
+11.75 79.75 ;...
+11.25 79.75 ;...
+11.25 79.75 ;...
+10.83 79.83 ;...
+10.25 79.83 ;...
+10.25 79.33 ;...
+9.75 79 ;...
+9.25 79 ;...
+9.17 78.58 ;...
+8.92 78.17 ;...
+8.33 78 ;...
+8.08 77.58 ;...
+8.25 77.17 ;...
+8.58 76.83 ;...
+9 76.5 ;...
+9.58 76.33 ;...
+10.17 76.17 ;...
+10.67 76 ;...
+11.17 75.83 ;...
+11.58 75.58 ;...
+12 75.25 ;...
+12.42 75 ;...
+13 74.83 ;...
+13.5 74.67 ;...
+14 74.5 ;...
+14.5 74.42 ;...
+15 74.08 ;...
+15.5 73.75 ;...
+16 73.5 ;...
+16.42 73.33 ;...
+17 73.25 ;...
+17.5 73.17 ;...
+18.08 73 ;...
+18.08 73 ;...
+18.67 72.83 ;...
+19 73 ;...
+19.25 72.83 ;...
+19.75 72.67 ;...
+20.25 72.75 ;...
+20.83 72.83 ;...
+21.25 72.58 ;...
+21.67 72.5 ;...
+22.17 72.58 ;...
+22.25 72.33 ;...
+21.83 72.17 ;...
+21.25 72.08 ;...
+20.92 71.67 ;...
+20.75 71.17 ;...
+20.75 70.58 ;...
+21 70.08 ;...
+21.42 69.67 ;...
+21.75 69.33 ;...
+22.25 69.08 ;...
+22.25 69.67 ;...
+22.42 70.17 ;...
+22.92 70.33 ;...
+22.75 69.75 ;...
+22.75 69.17 ;...
+23.08 68.67 ;...
+23.5 68.33 ;...
+23.75 67.92 ;...
+23.92 67.42 ;...
+24.33 67.33 ;...
+24.75 67.17 ;...
+24.75 67.17 ;...
+25 66.83 ;...
+25.33 66.58 ;...
+25.42 66.08 ;...
+25.33 65.58 ;...
+25.25 65.17 ;...
+25.17 64.67 ;...
+25.25 64.25 ;...
+25.42 63.83 ;...
+25.25 63.5 ;...
+25.25 62.92 ;...
+25.17 62.42 ;...
+25.08 61.83 ;...
+25.17 61.17 ;...
+25.25 60.75 ;...
+25.33 60.17 ;...
+25.33 59.67 ;...
+25.42 59.17 ;...
+25.58 58.58 ;...
+25.67 58 ;...
+25.75 57.33 ;...
+26.25 57.17 ;...
+26.67 57.08 ;...
+27 56.75 ;...
+27 56.25 ;...
+26.83 55.75 ;...
+26.67 55.33 ;...
+26.42 54.75 ;...
+26.67 54.33 ;...
+26.58 53.83 ;...
+26.92 53.5 ;...
+26.92 53.5 ;...
+27 53 ;...
+27.42 52.58 ;...
+27.67 52.17 ;...
+27.83 51.5 ;...
+28.25 51.25 ;...
+28.75 51.08 ;...
+29.25 50.67 ;...
+29.83 50.33 ;...
+30.17 50 ;...
+30 49.58 ;...
+30.25 49.25 ;...
+30.5 48.92 ;...
+30 48.58 ;...
+29.58 48.33 ;...
+29.42 47.75 ;...
+29.25 48.17 ;...
+28.75 48.33 ;...
+28.25 48.58 ;...
+27.83 48.83 ;...
+27.42 49.25 ;...
+27 49.67 ;...
+26.67 50.08 ;...
+26.25 50.25 ;...
+25.83 50.17 ;...
+25.42 50.5 ;...
+25.17 50.67 ;...
+25.58 51 ;...
+26 51.08 ;...
+26.08 51.33 ;...
+25.75 51.58 ;...
+25.75 51.58 ;...
+25.25 51.67 ;...
+24.75 51.58 ;...
+24.25 51.25 ;...
+24.25 51.67 ;...
+24 51.83 ;...
+23.92 52.42 ;...
+24.17 52.75 ;...
+24.17 53.25 ;...
+24.08 53.83 ;...
+24.17 54.25 ;...
+24.58 54.5 ;...
+24.92 55 ;...
+25.25 55.33 ;...
+25.58 55.67 ;...
+25.83 56.08 ;...
+26.17 56.25 ;...
+26.08 56.5 ;...
+25.83 56.42 ;...
+25.33 56.42 ;...
+24.75 56.42 ;...
+24.33 56.83 ;...
+24 57.25 ;...
+23.75 57.75 ;...
+23.67 58.25 ;...
+23.5 58.83 ;...
+23.08 59.08 ;...
+22.67 59.42 ;...
+22.42 59.83 ;...
+21.92 59.58 ;...
+21.42 59.33 ;...
+21.42 59.33 ;...
+21.17 59 ;...
+20.83 58.75 ;...
+20.42 58.5 ;...
+20.42 58.08 ;...
+20 57.83 ;...
+19.5 57.75 ;...
+19.08 57.92 ;...
+18.92 57.5 ;...
+18.83 57.08 ;...
+18.58 56.75 ;...
+18.08 56.58 ;...
+17.92 56.33 ;...
+17.83 55.92 ;...
+17.67 55.42 ;...
+17.17 55.25 ;...
+16.92 54.83 ;...
+17 54.42 ;...
+16.92 53.92 ;...
+16.67 53.42 ;...
+16.5 52.92 ;...
+16.25 52.42 ;...
+15.92 52.25 ;...
+15.58 52.25 ;...
+15.42 51.75 ;...
+15.25 51.25 ;...
+15.08 50.75 ;...
+14.92 50.25 ;...
+14.75 49.67 ;...
+14.5 49.17 ;...
+14.08 48.83 ;...
+14.08 48.83 ;...
+13.92 48.33 ;...
+13.92 47.83 ;...
+13.58 47.42 ;...
+13.33 47.08 ;...
+13.25 46.58 ;...
+13.33 46.08 ;...
+13.33 45.67 ;...
+13 45.42 ;...
+12.83 44.92 ;...
+12.75 44.42 ;...
+12.67 43.92 ;...
+12.75 43.5 ;...
+13.25 43.17 ;...
+13.75 43.25 ;...
+14 43.08 ;...
+14.58 42.92 ;...
+15.17 42.75 ;...
+15.67 42.67 ;...
+16.08 42.75 ;...
+16.5 42.67 ;...
+17 42.42 ;...
+17.5 42.08 ;...
+17.75 41.75 ;...
+18.17 41.5 ;...
+18.67 41.25 ;...
+19.17 41 ;...
+19.67 40.75 ;...
+20.08 40.33 ;...
+20.25 39.67 ;...
+20.58 39.5 ;...
+20.58 39.5 ;...
+20.92 39.17 ;...
+21.5 39.08 ;...
+21.92 39 ;...
+22.25 39.08 ;...
+22.75 39 ;...
+23.25 38.75 ;...
+23.75 38.42 ;...
+24.08 38 ;...
+24.33 37.5 ;...
+24.83 37.25 ;...
+25.33 37.08 ;...
+25.83 36.75 ;...
+26.17 36.42 ;...
+26.67 36.08 ;...
+27.17 35.75 ;...
+27.58 35.5 ;...
+28 35.17 ;...
+28 34.67 ;...
+28.5 34.83 ;...
+29 35 ;...
+29.5 35 ;...
+28.92 34.67 ;...
+28.33 34.42 ;...
+27.92 34.42 ;...
+27.75 34.17 ;...
+28.08 33.75 ;...
+28.42 33.42 ;...
+28.75 33.25 ;...
+29.08 33 ;...
+29.58 32.75 ;...
+29.58 32.75 ;...
+30 32.58 ;...
+NaN NaN ;...
+74.08 112.83 ;...
+74.33 111.58 ;...
+74.5 112.08 ;...
+74.42 113.33 ;...
+74.08 112.83 ;...
+NaN NaN ;...
+40.33 26.58 ;...
+40.33 27.17 ;...
+40.33 27.75 ;...
+40.33 27.75 ;...
+40.33 28.42 ;...
+40.33 29 ;...
+40.58 28.92 ;...
+40.67 29.42 ;...
+41 28.83 ;...
+41 28.17 ;...
+40.92 27.42 ;...
+40.67 27.08 ;...
+40.33 26.58 ;...
+NaN NaN ;...
+14.08 13.08 ;...
+13.75 13.33 ;...
+13.33 13.5 ;...
+13.08 13.83 ;...
+12.75 13.92 ;...
+12.5 14.17 ;...
+12.75 14.58 ;...
+12.92 15.08 ;...
+13.42 15.25 ;...
+13.42 14.75 ;...
+13.5 14.17 ;...
+13.83 14.17 ;...
+14.17 13.92 ;...
+14.25 13.42 ;...
+14.08 13.08 ;...
+NaN NaN ;...
+-2.08 31.67 ;...
+-2.75 31.83 ;...
+-2.33 32.25 ;...
+-2.5 32.75 ;...
+-2.5 33.42 ;...
+-2.17 33.83 ;...
+-2.08 33.25 ;...
+-1.75 33.58 ;...
+-1.33 33.92 ;...
+-0.75 34.08 ;...
+-0.42 34.25 ;...
+0.08 34 ;...
+0.25 33.5 ;...
+0.25 33 ;...
+0.17 32.42 ;...
+-0.17 31.83 ;...
+-0.75 31.67 ;...
+-1.17 31.83 ;...
+-1.58 31.75 ;...
+-2.08 31.67 ;...
+NaN NaN ;...
+-6 29.25 ;...
+-6.5 29.5 ;...
+-7 29.83 ;...
+-7.42 30.33 ;...
+-8.08 30.58 ;...
+-8.58 30.5 ;...
+-8.75 31.08 ;...
+-8.25 31 ;...
+-7.75 30.75 ;...
+-7.17 30.5 ;...
+-6.83 30.58 ;...
+-6.5 30.17 ;...
+-6.41753 30.0649 ;...
+-6.33504 29.9599 ;...
+-6.25253 29.8549 ;...
+-6.17 29.75 ;...
+-6.06501 29.8125 ;...
+-5.96001 29.875 ;...
+-5.85501 29.9375 ;...
+-5.75 30 ;...
+-5.605 29.9575 ;...
+-5.46001 29.915 ;...
+-5.315 29.8725 ;...
+-5.17 29.83 ;...
+-4.58 29.67 ;...
+-4 29.42 ;...
+-3.33 29.33 ;...
+-4 29.08 ;...
+-4.5 29.25 ;...
+-5 29.08 ;...
+-5.58 29.33 ;...
+-6 29.25 ;...
+NaN NaN ;...
+-12.17 34.08 ;...
+-12.58 34.17 ;...
+-13 34.33 ;...
+-13.42 34.33 ;...
+-13.75 34.67 ;...
+-14.17 34.75 ;...
+-14.08 35.08 ;...
+-14.5 35.33 ;...
+-13.83 35.25 ;...
+-13.42 34.92 ;...
+-12.83 34.83 ;...
+-12.25 34.83 ;...
+-12.105 34.8726 ;...
+-11.96 34.9151 ;...
+-11.815 34.9576 ;...
+-11.67 35 ;...
+-11.5225 34.9174 ;...
+-11.375 34.8348 ;...
+-11.2275 34.7524 ;...
+-11.08 34.67 ;...
+-10.58 34.58 ;...
+-9.92 34.5 ;...
+-9.58 34.08 ;...
+-9.83 34 ;...
+-10.83 34.25 ;...
+-11 34.25 ;...
+-11.75 34.42 ;...
+-12.17 34.08 ;...
+NaN NaN ;...
+42.5 27.5 ;...
+42.08 27.92 ;...
+41.67 28.08 ;...
+41.33 28.58 ;...
+41.17 29.17 ;...
+41.08 29.83 ;...
+41.17 30.5 ;...
+41.08 31.08 ;...
+41.33 31.58 ;...
+41.58 32.08 ;...
+41.83 32.58 ;...
+42 33.25 ;...
+41.92 33.83 ;...
+41.92 34.5 ;...
+42 35 ;...
+41.67 35.33 ;...
+41.67 36 ;...
+41.25 36.33 ;...
+41.25 36.75 ;...
+41 37.25 ;...
+41.08 37.67 ;...
+40.92 38.17 ;...
+41 38.75 ;...
+41.08 39.33 ;...
+41 39.92 ;...
+41.08 40.5 ;...
+41.42 41.08 ;...
+41.67 41.58 ;...
+42 41.67 ;...
+42.33 41.42 ;...
+42.33 41.42 ;...
+42.75 41.25 ;...
+42.92 41 ;...
+43.17 40.33 ;...
+43.42 39.83 ;...
+43.67 39.5 ;...
+44.08 39 ;...
+44.33 38.5 ;...
+44.42 38 ;...
+44.58 37.67 ;...
+44.75 37.25 ;...
+45.08 37 ;...
+45.08 36.67 ;...
+45 36.08 ;...
+45.08 35.58 ;...
+44.83 35.08 ;...
+44.75 34.58 ;...
+44.5 34.25 ;...
+44.33 33.75 ;...
+44.58 33.42 ;...
+45 33.58 ;...
+45.17 33.25 ;...
+45.33 32.67 ;...
+45.75 33.08 ;...
+45.92 33.75 ;...
+46.17 33.17 ;...
+46 32.58 ;...
+46.25 31.83 ;...
+46.58 32.08 ;...
+46.58 31.42 ;...
+46.58 30.83 ;...
+46.58 30.83 ;...
+46.17 30.58 ;...
+45.83 30.17 ;...
+45.67 29.58 ;...
+45.17 29.67 ;...
+44.83 29.5 ;...
+44.67 29 ;...
+44.33 28.67 ;...
+43.92 28.67 ;...
+43.42 28.58 ;...
+43.33 28 ;...
+42.83 27.92 ;...
+42.5 27.5 ;...
+NaN NaN ;...
+45.92 34.42 ;...
+45.75 34.75 ;...
+45.42 35 ;...
+45.25 35.5 ;...
+45.42 36.08 ;...
+45.42 36.67 ;...
+45.33 37.33 ;...
+45.83 37.75 ;...
+46.25 38.25 ;...
+46.42 37.92 ;...
+46.67 37.67 ;...
+46.67 38.17 ;...
+47 38.75 ;...
+47.08 39.33 ;...
+47.25 39 ;...
+47.17 38.42 ;...
+47.08 37.58 ;...
+46.75 36.83 ;...
+46.67 36.08 ;...
+46.42 35.42 ;...
+46.08 35 ;...
+45.92 34.42 ;...
+NaN NaN ;...
+44.75 46.67 ;...
+44.42 46.67 ;...
+44.25 47.08 ;...
+43.75 47.42 ;...
+43.42 47.42 ;...
+43 47.42 ;...
+42.5 47.83 ;...
+42.08 48.25 ;...
+41.83 48.67 ;...
+41.42 49 ;...
+41 49.17 ;...
+40.58 49.67 ;...
+40.5403 49.8153 ;...
+40.5004 49.9603 ;...
+40.4603 50.1053 ;...
+40.42 50.25 ;...
+40.3977 50.1249 ;...
+40.3753 49.9998 ;...
+40.3527 49.8749 ;...
+40.33 49.75 ;...
+40 49.42 ;...
+39.5 49.25 ;...
+39.17 49.17 ;...
+39.17 48.75 ;...
+38.58 48.83 ;...
+38.08 48.92 ;...
+37.58 49 ;...
+37.42 49.5 ;...
+37.33 50.17 ;...
+37.08 50.33 ;...
+36.75 51 ;...
+36.58 51.58 ;...
+36.75 52.42 ;...
+36.92 53.17 ;...
+36.83 53.92 ;...
+37.17 54 ;...
+37.17 54 ;...
+37.83 53.75 ;...
+38.5 53.75 ;...
+39.08 54 ;...
+39.17 53.58 ;...
+39.58 53.42 ;...
+40 53.5 ;...
+40 52.75 ;...
+40.33 52.67 ;...
+40.83 52.92 ;...
+40.67 53.5 ;...
+40.67 54.17 ;...
+41 54.75 ;...
+41.33 54.33 ;...
+41.67 53.92 ;...
+42.08 53.67 ;...
+42 53 ;...
+41.67 52.67 ;...
+41.25 52.75 ;...
+41.75 52.42 ;...
+41.855 52.3976 ;...
+41.96 52.3751 ;...
+42.065 52.3526 ;...
+42.17 52.33 ;...
+42.2726 52.4146 ;...
+42.3751 52.4994 ;...
+42.4776 52.5846 ;...
+42.58 52.67 ;...
+42.6425 52.6276 ;...
+42.705 52.5852 ;...
+42.7675 52.5426 ;...
+42.83 52.5 ;...
+42.83 51.83 ;...
+43.17 51.33 ;...
+43.67 51.17 ;...
+44.17 50.75 ;...
+44.33 50.17 ;...
+44.58 50.25 ;...
+44.58 50.92 ;...
+44.5 51.42 ;...
+44.5 51.42 ;...
+45 51.17 ;...
+45.0828 51.3144 ;...
+45.1654 51.4592 ;...
+45.2478 51.6044 ;...
+45.33 51.75 ;...
+45.353 51.9373 ;...
+45.3756 52.1247 ;...
+45.398 52.3123 ;...
+45.42 52.5 ;...
+45.17 53.08 ;...
+45.25 53.67 ;...
+45.25 54.25 ;...
+45.5 53.92 ;...
+45.92 53.83 ;...
+46.42 53.67 ;...
+46.83 53.17 ;...
+47.17 52.58 ;...
+47.17 52.08 ;...
+47.17 51.42 ;...
+47.25 50.92 ;...
+46.83 50.17 ;...
+46.75 49.5 ;...
+46.58 48.92 ;...
+46.33 49 ;...
+46.25 48.5 ;...
+45.92 48.25 ;...
+46.08 47.58 ;...
+45.67 47.33 ;...
+45.17 47 ;...
+44.75 46.67 ;...
+NaN NaN ;...
+45 58.17 ;...
+44.5 58.17 ;...
+44.25 58.33 ;...
+43.67 58.33 ;...
+43.67 59 ;...
+43.67 59.67 ;...
+43.5 59.83 ;...
+43.67 60.5 ;...
+44.17 61.08 ;...
+44.67 61.08 ;...
+44.75 61.5 ;...
+45 61.83 ;...
+45.33 61.5 ;...
+45.75 61 ;...
+46 61 ;...
+46.42 61.33 ;...
+46.75 61.67 ;...
+46.5 61 ;...
+46.67 60.75 ;...
+46.5 60.08 ;...
+46.17 60.08 ;...
+46.33 59.75 ;...
+45.92 59.5 ;...
+45.92 59 ;...
+45.83 58.67 ;...
+45.42 58.58 ;...
+45 58.17 ;...
+NaN NaN ;...
+45.58 73.42 ;...
+45.25 73.83 ;...
+44.92 74.08 ;...
+45.67 74.25 ;...
+46 74.17 ;...
+46.17 74.75 ;...
+46.42 75.17 ;...
+46.58 76 ;...
+46.42 76.83 ;...
+46.48 77.33 ;...
+46.33 78 ;...
+46.42 78.75 ;...
+46.75 79.25 ;...
+46.75 78.75 ;...
+46.58 78.33 ;...
+46.67 77.83 ;...
+46.58 77.33 ;...
+46.67 76.83 ;...
+46.75 76.08 ;...
+46.67 75.25 ;...
+46.75 74.75 ;...
+46.42 74.17 ;...
+46.17 73.67 ;...
+45.58 73.42 ;...
+NaN NaN ;...
+51.58 103.58 ;...
+51.33 104.5 ;...
+51.5 105.25 ;...
+51.67 105.92 ;...
+52.17 106.25 ;...
+52.5 107 ;...
+52.67 107.83 ;...
+53 108.33 ;...
+53.5 109 ;...
+54 109.42 ;...
+54.5 109.5 ;...
+55.08 109.67 ;...
+55.58 109.92 ;...
+55.75 109.5 ;...
+55.5 109.25 ;...
+55.17 109.17 ;...
+54.75 108.83 ;...
+54.33 108.5 ;...
+53.92 108.17 ;...
+53.5 107.58 ;...
+53.08 107 ;...
+52.75 106.58 ;...
+52.5 106.08 ;...
+52.17 105.67 ;...
+51.83 105.25 ;...
+51.75 104.5 ;...
+51.58 103.58 ;...
+NaN NaN ;...
+61.08 30 ;...
+60.75 30.58 ;...
+60.33 30.92 ;...
+59.92 31.08 ;...
+59.92 31.5 ;...
+60.17 31.75 ;...
+60.17 32.5 ;...
+60.5 32.83 ;...
+60.83 32.83 ;...
+61.17 32.5 ;...
+61.33 31.83 ;...
+61.58 31.42 ;...
+61.58 30.75 ;...
+61.33 30.17 ;...
+61.08 30 ;...
+NaN NaN ;...
+61.75 34.5 ;...
+61.5 34.83 ;...
+61.33 35.5 ;...
+60.92 35.5 ;...
+60.92 36 ;...
+61.08 36.42 ;...
+61.42 36.42 ;...
+61.67 36 ;...
+62 35.75 ;...
+62.42 35.83 ;...
+62.67 35.25 ;...
+62.83 34.5 ;...
+62.58 34.83 ;...
+62.5 35.33 ;...
+62.17 35.33 ;...
+62.17 34.67 ;...
+61.75 34.5 ;...
+NaN NaN ;...
+-21.92 43.25 ;...
+-21.33 43.58 ;...
+-20.92 43.92 ;...
+-20.5 44.08 ;...
+-20 44.42 ;...
+-19.58 44.5 ;...
+-19 44.25 ;...
+-18.58 44.25 ;...
+-18 44 ;...
+-18 44 ;...
+-17.42 43.92 ;...
+-16.92 44.25 ;...
+-16.17 44.5 ;...
+-16.17 44.92 ;...
+-15.92 45.42 ;...
+-15.75 45.92 ;...
+-15.5 46.42 ;...
+-15.25 46.83 ;...
+-14.75 47.25 ;...
+-14.75 47.67 ;...
+-14.33 47.58 ;...
+-14.08 47.92 ;...
+-13.58 47.83 ;...
+-13.5 48.33 ;...
+-13.33 48.75 ;...
+-12.92 48.83 ;...
+-12.42 48.83 ;...
+-12 49.25 ;...
+-12.5 49.58 ;...
+-13 49.83 ;...
+-13.58 49.92 ;...
+-14.17 50.17 ;...
+-14.92 50.33 ;...
+-15.42 50.42 ;...
+-15.92 50.08 ;...
+-15.5 49.67 ;...
+-16.33 49.75 ;...
+-16.75 49.67 ;...
+-17 49.42 ;...
+-17.42 49.42 ;...
+-17.42 49.42 ;...
+-18 49.33 ;...
+-18.5 49.25 ;...
+-19.08 49 ;...
+-19.58 48.83 ;...
+-20.08 48.67 ;...
+-20.5 48.5 ;...
+-20.92 48.42 ;...
+-21.33 48.25 ;...
+-21.75 48.08 ;...
+-22.17 47.92 ;...
+-22.58 47.83 ;...
+-23 47.75 ;...
+-23.42 47.67 ;...
+-23.75 47.5 ;...
+-24.25 47.33 ;...
+-24.67 47.17 ;...
+-25.17 46.83 ;...
+-25.08 46.42 ;...
+-25.25 45.92 ;...
+-25.5 45.5 ;...
+-25.58 45 ;...
+-25.33 44.58 ;...
+-25 44.17 ;...
+-24.5 43.75 ;...
+-23.92 43.67 ;...
+-23.25 43.58 ;...
+-22.83 43.33 ;...
+-22.33 43.25 ;...
+-21.92 43.25 ;...
+NaN NaN ;...
+-21 55.17 ;...
+-20.92 55.58 ;...
+-21.17 55.83 ;...
+-21.33 55.75 ;...
+-21.33 55.33 ;...
+-21 55.17 ;...
+NaN NaN ;...
+-20.5 57.33 ;...
+-20 57.5 ;...
+-20.17 57.75 ;...
+-20.5 57.75 ;...
+-20.5 57.33 ;...
+NaN NaN ;...
+12.42 53.33 ;...
+12.67 53.67 ;...
+12.58 54.17 ;...
+12.5 54.58 ;...
+12.25 54.08 ;...
+12.25 53.67 ;...
+12.42 53.33 ;...
+NaN NaN ;...
+-48.83 68.83 ;...
+-48.67 69 ;...
+-49 69.33 ;...
+-49.25 69.67 ;...
+-49.17 70 ;...
+-49.08 70.58 ;...
+-49.42 70.42 ;...
+-49.42 70 ;...
+-49.67 69.83 ;...
+-49.5 69.5 ;...
+-49.67 68.92 ;...
+-49.25 69.17 ;...
+-48.83 68.83 ;...
+NaN NaN ;...
+11.5628 92.6807 ;...
+11.8212 92.5312 ;...
+12.7377 92.7533 ;...
+12.7509 92.9143 ;...
+12.6122 92.9652 ;...
+11.5628 92.6807 ;...
+NaN NaN ;...
+12.997 92.8677 ;...
+13.1611 92.8272 ;...
+13.4075 92.8832 ;...
+13.49 92.9795 ;...
+13.4714 93.0646 ;...
+13.1872 93.0769 ;...
+12.997 92.8677 ;...
+NaN NaN ;...
+2.36635 96.4071 ;...
+2.51322 96.0803 ;...
+2.71893 95.8091 ;...
+2.85556 95.848 ;...
+2.88239 95.974 ;...
+2.6184 96.2558 ;...
+2.36635 96.4071 ;...
+NaN NaN ;...
+1.33 97.25 ;...
+1.5 97.5 ;...
+1 98 ;...
+0.58 98 ;...
+1 97.58 ;...
+1.33 97.25 ;...
+NaN NaN ;...
+-1 98.75 ;...
+-0.92 99 ;...
+-1.67 99.42 ;...
+-1.75 99.08 ;...
+-1.5 98.83 ;...
+-1 98.75 ;...
+NaN NaN ;...
+8.08 79.75 ;...
+8.5 79.83 ;...
+9 79.92 ;...
+9.42 80.17 ;...
+9.83 80 ;...
+9.5 80.58 ;...
+9 80.92 ;...
+8.5 81.25 ;...
+8 81.5 ;...
+7.58 81.83 ;...
+7 81.83 ;...
+6.5 81.67 ;...
+6.17 81.25 ;...
+6 80.67 ;...
+6 80.25 ;...
+6.42 80 ;...
+7.08 79.83 ;...
+7.5 79.75 ;...
+8.08 79.75 ;...
+NaN NaN ;...
+19.33 108.67 ;...
+19.58 109.08 ;...
+19.92 109.58 ;...
+20 110 ;...
+20 110.5 ;...
+20 111 ;...
+19.67 111 ;...
+19.33 110.67 ;...
+18.83 110.5 ;...
+18.5 110.17 ;...
+18.25 109.67 ;...
+18.25 109.25 ;...
+18.5 108.75 ;...
+18.92 108.67 ;...
+19.33 108.67 ;...
+NaN NaN ;...
+23.67 120.17 ;...
+24.17 120.5 ;...
+24.58 120.75 ;...
+25 121.08 ;...
+25.25 121.58 ;...
+25.08 121.92 ;...
+24.58 121.92 ;...
+24.08 121.67 ;...
+23.5 121.5 ;...
+23 121.33 ;...
+22.58 121 ;...
+22 120.83 ;...
+22.42 120.67 ;...
+22.58 120.33 ;...
+23.17 120.17 ;...
+23.67 120.17 ;...
+NaN NaN ;...
+33.25 129.5 ;...
+33.5 130 ;...
+33.83 130.42 ;...
+33.92 130.92 ;...
+33.67 131.08 ;...
+33.67 131.58 ;...
+33.25 131.67 ;...
+32.83 131.92 ;...
+32.42 131.67 ;...
+31.92 131.42 ;...
+31.42 131.33 ;...
+31.08 130.75 ;...
+31.25 130.17 ;...
+31.67 130.17 ;...
+32.08 130.17 ;...
+32.33 130.5 ;...
+32.75 130.5 ;...
+33.08 130.25 ;...
+32.75 130.17 ;...
+32.75 129.75 ;...
+33.25 129.5 ;...
+NaN NaN ;...
+33.42 132.25 ;...
+33.67 132.67 ;...
+34 132.83 ;...
+33.92 133.33 ;...
+34.25 133.58 ;...
+34.33 134.08 ;...
+34.17 134.58 ;...
+33.83 134.67 ;...
+33.58 134.33 ;...
+33.25 134.08 ;...
+33.5 133.75 ;...
+33.33 133.25 ;...
+33.08 133.08 ;...
+32.75 132.92 ;...
+32.92 132.42 ;...
+33.42 132.25 ;...
+NaN NaN ;...
+34.33 130.92 ;...
+34.5 131.42 ;...
+34.75 131.83 ;...
+35.08 132.33 ;...
+35.5 132.75 ;...
+35.5 133.25 ;...
+35.58 134 ;...
+35.67 134.67 ;...
+35.75 135.25 ;...
+35.5 135.67 ;...
+35.67 136 ;...
+36.08 136 ;...
+36.42 136.33 ;...
+36.83 136.67 ;...
+37.25 136.75 ;...
+37.5 137.17 ;...
+37.08 137 ;...
+36.75 137 ;...
+37 137.58 ;...
+37.17 138.17 ;...
+37.17 138.17 ;...
+37.42 138.58 ;...
+37.83 138.83 ;...
+38.08 139.33 ;...
+38.58 139.5 ;...
+39 139.83 ;...
+39.42 140 ;...
+39.92 140 ;...
+40.33 140 ;...
+40.67 140 ;...
+41.17 140.33 ;...
+40.92 140.67 ;...
+40.92 141.08 ;...
+41.17 141.08 ;...
+41.17 140.75 ;...
+41.5 140.83 ;...
+41.33 141.42 ;...
+41 141.33 ;...
+40.58 141.42 ;...
+40.17 141.75 ;...
+39.67 141.92 ;...
+39.08 141.83 ;...
+38.75 141.5 ;...
+38.33 141.5 ;...
+38.33 141.08 ;...
+38.08 140.92 ;...
+37.58 141 ;...
+37 140.92 ;...
+36.75 140.67 ;...
+36.17 140.58 ;...
+35.75 140.75 ;...
+35.75 140.75 ;...
+35.5 140.42 ;...
+35.17 140.33 ;...
+34.92 139.75 ;...
+35.5 139.83 ;...
+35.25 139.58 ;...
+35.25 139.08 ;...
+34.58 138.83 ;...
+35.08 138.67 ;...
+34.58 138.08 ;...
+34.58 137.25 ;...
+35 136.75 ;...
+34.67 136.5 ;...
+34.33 136.75 ;...
+33.92 136.17 ;...
+33.42 135.75 ;...
+33.5 135.42 ;...
+33.92 135.17 ;...
+34.33 135.17 ;...
+34.75 135.5 ;...
+34.75 134.92 ;...
+34.83 134.5 ;...
+34.58 133.92 ;...
+34.5 133.5 ;...
+34.33 132.92 ;...
+34.25 132.33 ;...
+33.92 132 ;...
+34 131.5 ;...
+33.92 130.92 ;...
+34.33 130.92 ;...
+NaN NaN ;...
+42.58 139.83 ;...
+42.92 140.42 ;...
+43.25 140.33 ;...
+43.17 140.75 ;...
+43.17 141.25 ;...
+43.42 141.33 ;...
+43.75 141.25 ;...
+44 141.58 ;...
+44.33 141.67 ;...
+44.75 141.75 ;...
+45.25 141.5 ;...
+45.5 141.92 ;...
+45.17 142.33 ;...
+44.75 142.67 ;...
+44.75 142.67 ;...
+44.5 143 ;...
+44.17 143.58 ;...
+44 144.17 ;...
+43.92 144.67 ;...
+44.25 145.33 ;...
+43.75 145 ;...
+43.58 145.25 ;...
+43.33 145.25 ;...
+43.42 145.83 ;...
+43.17 145.33 ;...
+42.92 144.75 ;...
+43 144.17 ;...
+42.67 143.67 ;...
+42.33 143.33 ;...
+42 143.25 ;...
+42.25 142.5 ;...
+42.58 141.83 ;...
+42.5 141.33 ;...
+42.33 140.92 ;...
+42.5 140.67 ;...
+42.5 140.33 ;...
+42.25 140.25 ;...
+42.08 140.67 ;...
+41.83 141.08 ;...
+41.67 140.5 ;...
+41.42 140 ;...
+42 140 ;...
+42.25 139.75 ;...
+42.58 139.83 ;...
+NaN NaN ;...
+26.0796 127.668 ;...
+26.3183 127.656 ;...
+26.5633 127.817 ;...
+26.8715 128.303 ;...
+26.8273 128.34 ;...
+26.7382 128.348 ;...
+26.419 127.962 ;...
+26.1225 127.793 ;...
+26.0796 127.668 ;...
+NaN NaN ;...
+28.0781 129.296 ;...
+28.1158 129.243 ;...
+28.2665 129.232 ;...
+28.3733 129.293 ;...
+28.4989 129.525 ;...
+28.4427 129.726 ;...
+28.0781 129.296 ;...
+NaN NaN ;...
+43.9565 145.442 ;...
+44.5231 146.07 ;...
+44.4503 146.395 ;...
+43.9565 145.442 ;...
+NaN NaN ;...
+44.42 146.92 ;...
+45 147.42 ;...
+45.33 147.92 ;...
+45.25 148.33 ;...
+45.5 148.83 ;...
+45.08 148.17 ;...
+44.75 147.5 ;...
+44.42 146.92 ;...
+NaN NaN ;...
+45.67 149.5 ;...
+46.08 150 ;...
+46.25 150.5 ;...
+45.75 149.83 ;...
+45.67 149.5 ;...
+NaN NaN ;...
+50.27 155.17 ;...
+50.42 155.67 ;...
+50.75 156.08 ;...
+50.5 156.08 ;...
+50.2 155.83 ;...
+50 155.17 ;...
+50.27 155.17 ;...
+NaN NaN ;...
+54.83 137.25 ;...
+55.17 137.5 ;...
+55 138.08 ;...
+54.58 137.58 ;...
+54.83 137.25 ;...
+NaN NaN ;...
+58.5 163.33 ;...
+59 163.67 ;...
+59.17 164.25 ;...
+58.92 164.42 ;...
+58.5 163.33 ;...
+NaN NaN ;...
+53.33 141.75 ;...
+53.58 142.25 ;...
+53.6425 142.312 ;...
+53.7051 142.375 ;...
+53.7675 142.437 ;...
+53.83 142.5 ;...
+53.935 142.438 ;...
+54.0401 142.376 ;...
+54.145 142.313 ;...
+54.25 142.25 ;...
+54.2701 142.355 ;...
+54.2902 142.46 ;...
+54.3101 142.565 ;...
+54.33 142.67 ;...
+54 142.92 ;...
+53.5 143 ;...
+53 143.17 ;...
+52.5 143.17 ;...
+52 143.08 ;...
+51.5 143.33 ;...
+51 143.5 ;...
+50.5 143.67 ;...
+50 143.92 ;...
+49.5 144.17 ;...
+49.08 144.33 ;...
+49.33 143.75 ;...
+49.33 143.25 ;...
+49.33 143.25 ;...
+49.08 142.92 ;...
+48.75 142.83 ;...
+48.33 142.58 ;...
+47.83 142.5 ;...
+47.5 142.67 ;...
+47.25 143 ;...
+46.92 143.08 ;...
+46.75 143.42 ;...
+46.25 143.42 ;...
+46.58 143 ;...
+46.67 142.58 ;...
+46.42 142.25 ;...
+45.92 142 ;...
+46.58 141.75 ;...
+47 142 ;...
+47.58 142 ;...
+48 142.17 ;...
+48.5 142 ;...
+48.75 141.83 ;...
+49.08 142 ;...
+49.5 142.08 ;...
+50 142.08 ;...
+50.5 142 ;...
+51 142.08 ;...
+51.42 142 ;...
+51.75 141.67 ;...
+52.25 141.67 ;...
+52.75 141.83 ;...
+53.33 141.75 ;...
+NaN NaN ;...
+71 180 ;...
+70.83 178.83 ;...
+71.12 178.67 ;...
+71.53 180 ;...
+71.53 181.75 ;...
+71.4403 181.96 ;...
+71.3505 182.169 ;...
+71.2603 182.375 ;...
+71.17 182.58 ;...
+71.104 182.138 ;...
+71.0371 181.699 ;...
+70.969 181.263 ;...
+70.9 180.83 ;...
+71 180 ;...
+NaN NaN ;...
+73.5 140.58 ;...
+73.83 141 ;...
+73.92 142.25 ;...
+73.5 143.5 ;...
+73.17 143.5 ;...
+73.25 141.58 ;...
+73.5 140.58 ;...
+NaN NaN ;...
+74.25 140.17 ;...
+74.25 141 ;...
+74 141 ;...
+73.92 140.33 ;...
+74.25 140.17 ;...
+NaN NaN ;...
+75.58 146.5 ;...
+75.42 147.33 ;...
+75.33 149.25 ;...
+75.17 151 ;...
+74.75 150.5 ;...
+74.75 149.25 ;...
+75.08 147.25 ;...
+75.58 146.5 ;...
+NaN NaN ;...
+75.25 137.17 ;...
+75.92 137.5 ;...
+76.17 139 ;...
+75.75 141 ;...
+76.17 142 ;...
+75.83 143.25 ;...
+75.83 144 ;...
+75.67 145 ;...
+75.42 145 ;...
+75.08 144 ;...
+74.83 142.83 ;...
+75 142 ;...
+74.83 140.5 ;...
+74.67 139.25 ;...
+74.75 138.08 ;...
+75.25 137.17 ;...
+NaN NaN ;...
+77.92 99.33 ;...
+78.33 99.83 ;...
+78.83 100.58 ;...
+79.25 101.25 ;...
+79.42 102.33 ;...
+79.17 103.92 ;...
+78.83 105 ;...
+78.33 105 ;...
+78.17 103.17 ;...
+78.17 101 ;...
+77.92 99.33 ;...
+NaN NaN ;...
+80.08 91.17 ;...
+80.5 92.67 ;...
+80.92 93.17 ;...
+81.25 95.58 ;...
+81 96.42 ;...
+80.9378 96.6955 ;...
+80.8754 96.9673 ;...
+80.8128 97.2355 ;...
+80.75 97.5 ;...
+80.6876 97.3725 ;...
+80.6251 97.2467 ;...
+80.5626 97.1225 ;...
+80.5 97 ;...
+80.4176 97.1487 ;...
+80.3351 97.2949 ;...
+80.2526 97.4386 ;...
+80.17 97.58 ;...
+80 99.25 ;...
+79.9177 99.4856 ;...
+79.8353 99.7174 ;...
+79.7527 99.9455 ;...
+79.67 100.17 ;...
+79.5651 99.9975 ;...
+79.4602 99.8284 ;...
+79.3551 99.6626 ;...
+79.25 99.5 ;...
+78.83 99.83 ;...
+78.75 98.33 ;...
+78.92 96.75 ;...
+79.08 94.67 ;...
+79.5 93.83 ;...
+79.67 91.83 ;...
+80.08 91.17 ;...
+NaN NaN ;...
+71.58 51.67 ;...
+71.705 51.67 ;...
+71.83 51.67 ;...
+71.955 51.67 ;...
+72.08 51.67 ;...
+72.1431 51.9373 ;...
+72.2057 52.2063 ;...
+72.2681 52.4772 ;...
+72.33 52.75 ;...
+72.415 52.6884 ;...
+72.5 52.6262 ;...
+72.585 52.5634 ;...
+72.67 52.5 ;...
+72.7013 52.5456 ;...
+72.7326 52.5914 ;...
+72.764 52.6374 ;...
+72.7953 52.6835 ;...
+72.8265 52.7298 ;...
+72.8578 52.7763 ;...
+72.8891 52.8229 ;...
+72.9203 52.8697 ;...
+73.0453 53.0585 ;...
+73.17 53.25 ;...
+73.233 53.5171 ;...
+73.2957 53.7861 ;...
+73.358 54.0571 ;...
+73.42 54.33 ;...
+73.5026 54.2068 ;...
+73.5851 54.0824 ;...
+73.6676 53.9568 ;...
+73.75 53.83 ;...
+74.17 54.92 ;...
+74.75 55.75 ;...
+75.08 55.75 ;...
+75.33 57.5 ;...
+75.33 57.5 ;...
+75.58 58.17 ;...
+75.92 59.25 ;...
+76.25 61.17 ;...
+76.25 63.17 ;...
+76.42 65.17 ;...
+76.75 66 ;...
+77 67.75 ;...
+76.83 69.08 ;...
+76.33 68.83 ;...
+76.08 67.25 ;...
+75.83 65.5 ;...
+75.67 63.83 ;...
+75.33 62.25 ;...
+75 60.75 ;...
+74.58 59.83 ;...
+74.17 58.67 ;...
+73.75 57.83 ;...
+73.33 57 ;...
+72.92 56.17 ;...
+72.5 55.58 ;...
+72 55.5 ;...
+71.5 55.75 ;...
+71.08 56.33 ;...
+70.9984 56.6692 ;...
+70.9162 57.0056 ;...
+70.8334 57.3392 ;...
+70.75 57.67 ;...
+70.7078 57.4813 ;...
+70.6654 57.2934 ;...
+70.6228 57.1063 ;...
+70.58 56.92 ;...
+70.67 55.33 ;...
+70.83 53.83 ;...
+70.9155 53.5832 ;...
+71.0007 53.3343 ;...
+71.0855 53.0832 ;...
+71.17 52.83 ;...
+71.2732 52.5446 ;...
+71.3759 52.2562 ;...
+71.4782 51.9646 ;...
+71.58 51.67 ;...
+NaN NaN ;...
+68.75 48.75 ;...
+69.25 48.5 ;...
+69.58 49.17 ;...
+69.25 50.33 ;...
+68.92 50 ;...
+68.75 48.75 ;...
+NaN NaN ;...
+80.6667 63.2489 ;...
+80.6746 62.948 ;...
+80.8487 63.145 ;...
+80.938 63.3672 ;...
+81.059 64.3083 ;...
+81.2687 64.7424 ;...
+81.1606 65.142 ;...
+81.0427 65.3646 ;...
+80.894 65.2347 ;...
+80.774 64.8409 ;...
+80.6667 63.2489 ;...
+NaN NaN ;...
+80.3915 59.6481 ;...
+80.7021 59.4564 ;...
+80.8247 59.5796 ;...
+80.8689 59.7898 ;...
+80.8001 60.4064 ;...
+80.8087 61.3671 ;...
+80.8967 61.9936 ;...
+80.67 62.2174 ;...
+80.5571 61.8584 ;...
+80.4004 61.2053 ;...
+80.4883 60.7564 ;...
+80.3915 59.6481 ;...
+NaN NaN ;...
+79.8721 56.6394 ;...
+79.9258 56.5423 ;...
+79.982 56.0694 ;...
+80.1152 56.1001 ;...
+80.2779 56.2996 ;...
+80.2388 57.0281 ;...
+80.1813 57.044 ;...
+79.9344 56.926 ;...
+79.8721 56.6394 ;...
+NaN NaN ;...
+80.4523 57.5083 ;...
+80.4277 58.4501 ;...
+80.3429 59.0025 ;...
+80.199 58.6139 ;...
+80.0375 57.7287 ;...
+80.1043 57.6403 ;...
+80.4523 57.5083 ;...
+NaN NaN ;...
+80.8 54.25 ;...
+81.03 54.83 ;...
+81.03 56.5 ;...
+81.42 56.5 ;...
+81.75 58.5 ;...
+81.33 58.83 ;...
+81 57.67 ;...
+80.8 58.5 ;...
+80.63 56.33 ;...
+80.8 54.25 ;...
+NaN NaN ;...
+80.25 53.17 ;...
+80.5 54 ;...
+80.4453 54.2543 ;...
+80.3904 54.5057 ;...
+80.3353 54.7542 ;...
+80.28 55 ;...
+80.2734 54.5415 ;...
+80.2662 54.0836 ;...
+80.2584 53.6264 ;...
+80.25 53.17 ;...
+NaN NaN ;...
+80.55 44.5 ;...
+80.9 47 ;...
+80.75 48 ;...
+80.55 48 ;...
+80.78 50.33 ;...
+80.67 51.17 ;...
+80.4 50 ;...
+80.12 50 ;...
+80.05 48.42 ;...
+80.17 47 ;...
+80.42 47 ;...
+80.42 45.67 ;...
+80.55 44.5 ;...
+NaN NaN ;...
+79.75 11 ;...
+79.83 13.83 ;...
+79.67 15.5 ;...
+80.03 16.33 ;...
+79.92 18 ;...
+80.25 18 ;...
+80.5 20 ;...
+80.17 21 ;...
+80.42 23 ;...
+80.25 24.83 ;...
+80.25 24.83 ;...
+80.17 27 ;...
+79.87 27 ;...
+79.42 25.67 ;...
+79.17 23.83 ;...
+79.33 20.92 ;...
+79.7 18.75 ;...
+79.17 19 ;...
+78.83 21.5 ;...
+78.42 22.17 ;...
+78.08 23.17 ;...
+77.75 24.83 ;...
+77.25 22.67 ;...
+77.42 20.83 ;...
+77.92 21.67 ;...
+78.18 20.67 ;...
+78.63 20.25 ;...
+78.42 19 ;...
+78.03 18.92 ;...
+77.5 18.08 ;...
+77 17.33 ;...
+76.5 17 ;...
+77.02 15.17 ;...
+77.5 14 ;...
+77.95 13.67 ;...
+78.0182 14.0166 ;...
+78.0859 14.3671 ;...
+78.1532 14.7216 ;...
+78.22 15.08 ;...
+78.2212 14.6025 ;...
+78.2216 14.125 ;...
+78.2212 13.6475 ;...
+78.22 13.17 ;...
+78.7 11.67 ;...
+79.33 11 ;...
+79.75 11 ;...
+NaN NaN ;...
+62.08 -7.42 ;...
+62.3 -7.25 ;...
+62.37 -6.33 ;...
+62 -6.92 ;...
+62.08 -7.42 ;...
+NaN NaN ;...
+59.9485 -1.38344 ;...
+60.0779 -1.44896 ;...
+60.2221 -1.40667 ;...
+60.3865 -1.48212 ;...
+60.5179 -1.28568 ;...
+60.2983 -1.22756 ;...
+60.1687 -1.08101 ;...
+59.9485 -1.38344 ;...
+NaN NaN ;...
+58.8596 -3.0374 ;...
+58.9399 -3.28081 ;...
+59.0397 -3.4012 ;...
+59.1909 -3.33469 ;...
+59.2121 -3.15674 ;...
+58.9822 -2.92567 ;...
+58.8735 -2.96662 ;...
+58.8596 -3.0374 ;...
+NaN NaN ;...
+57.58 -7.25 ;...
+57.83 -7 ;...
+58.17 -7 ;...
+58.5 -6.17 ;...
+58 -6.42 ;...
+57.58 -7.25 ;...
+NaN NaN ;...
+55.47 9.75 ;...
+55.58 10.28 ;...
+55.37 10.75 ;...
+55.05 10.7 ;...
+55.05 10.08 ;...
+55.47 9.75 ;...
+NaN NaN ;...
+55.7 11.08 ;...
+55.9 11.75 ;...
+56.1 12.42 ;...
+56 12.4603 ;...
+55.9 12.5004 ;...
+55.8 12.5403 ;...
+55.7 12.58 ;...
+55.6426 12.477 ;...
+55.5852 12.3744 ;...
+55.5276 12.2721 ;...
+55.47 12.17 ;...
+55.28 12.42 ;...
+55.13 11.83 ;...
+55.2 11.25 ;...
+55.7 11.08 ;...
+NaN NaN ;...
+54.83 11 ;...
+54.92 11.58 ;...
+54.83 12.08 ;...
+54.67 11.83 ;...
+54.58 11.33 ;...
+54.83 11 ;...
+NaN NaN ;...
+57.5 18.08 ;...
+57.78 18.42 ;...
+57.93 19.17 ;...
+57.7 18.75 ;...
+57.28 18.75 ;...
+57.08 18.25 ;...
+57.5 18.08 ;...
+NaN NaN ;...
+58.33 21.83 ;...
+58.62 22.33 ;...
+58.5 23.17 ;...
+58.3 22.67 ;...
+58.1 22.17 ;...
+58.33 21.83 ;...
+NaN NaN ;...
+58.88 22.33 ;...
+59.08 22.5 ;...
+58.83 23 ;...
+58.72 22.42 ;...
+58.88 22.33 ;...
+NaN NaN ;...
+50.08 -5.58 ;...
+50.42 -5 ;...
+50.75 -4.5 ;...
+51.17 -4.08 ;...
+51.17 -3.5 ;...
+51.17 -3 ;...
+51.58 -2.67 ;...
+51.33 -3.33 ;...
+51.58 -3.83 ;...
+51.67 -4.33 ;...
+51.58 -5 ;...
+51.6425 -5.04232 ;...
+51.705 -5.08477 ;...
+51.7675 -5.12732 ;...
+51.83 -5.17 ;...
+51.8928 -5.02312 ;...
+51.9554 -4.87582 ;...
+52.0178 -4.72812 ;...
+52.08 -4.58 ;...
+52.25 -4.08 ;...
+52.75 -4.08 ;...
+52.75 -4.67 ;...
+53.08 -4.33 ;...
+53.33 -4.5 ;...
+53.25 -3.58 ;...
+53.33 -3 ;...
+53.75 -3 ;...
+54.17 -2.67 ;...
+54.17 -3.17 ;...
+54.5 -3.58 ;...
+54.92 -3.25 ;...
+54.75 -4 ;...
+54.58 -4.75 ;...
+54.6651 -4.83198 ;...
+54.7501 -4.91431 ;...
+54.8351 -4.99698 ;...
+54.92 -5.08 ;...
+55.0651 -4.97862 ;...
+55.2102 -4.87649 ;...
+55.3551 -4.77362 ;...
+55.5 -4.67 ;...
+55.5826 -4.77185 ;...
+55.6652 -4.87414 ;...
+55.7476 -4.97685 ;...
+55.83 -5.08 ;...
+55.83 -5.08 ;...
+55.42 -5.58 ;...
+55.75 -5.58 ;...
+55.75 -6.17 ;...
+56.08 -5.5 ;...
+56.5 -5.42 ;...
+56.33 -6 ;...
+56.67 -6 ;...
+57.17 -5.58 ;...
+57.17 -6.25 ;...
+57.42 -6.67 ;...
+57.58 -6.33 ;...
+57.42 -5.83 ;...
+57.75 -5.75 ;...
+58.08 -5.33 ;...
+58.5 -5 ;...
+58.5 -4.08 ;...
+58.58 -3.17 ;...
+58.33 -3 ;...
+58 -3.83 ;...
+57.58 -4.17 ;...
+57.67 -3.42 ;...
+57.67 -2.75 ;...
+57.67 -1.92 ;...
+57.33 -1.83 ;...
+57 -2.08 ;...
+56.58 -2.42 ;...
+56.25 -2.83 ;...
+56 -3.33 ;...
+56 -2.67 ;...
+55.83 -2 ;...
+55.83 -2 ;...
+55.5 -1.67 ;...
+55 -1.5 ;...
+54.58 -1.17 ;...
+54.42 -0.5 ;...
+54 -0.25 ;...
+53.67 -0.08 ;...
+53.42 0.17 ;...
+53.08 0.33 ;...
+52.83 0 ;...
+52.67 0.25 ;...
+52.92 0.58 ;...
+52.92 1 ;...
+52.75 1.58 ;...
+52.42 1.83 ;...
+52 1.58 ;...
+51.75 1 ;...
+51.33 0.58 ;...
+51.25 1.42 ;...
+50.83 0.92 ;...
+50.67 0.08 ;...
+50.67 -0.67 ;...
+50.75 -1.42 ;...
+50.5 -2.08 ;...
+50.67 -2.83 ;...
+50.58 -3.5 ;...
+50.17 -3.67 ;...
+50.33 -4.5 ;...
+50 -5.08 ;...
+50.08 -5.58 ;...
+NaN NaN ;...
+52.17 -10.33 ;...
+52.17 -9.75 ;...
+52.67 -9.5 ;...
+53.17 -9 ;...
+53.17 -9.5 ;...
+53.5 -10 ;...
+53.75 -9.5 ;...
+53.92 -10 ;...
+54.25 -9.75 ;...
+54.25 -9.08 ;...
+54.25 -8.5 ;...
+54.5 -8.17 ;...
+54.67 -8.67 ;...
+55.08 -8.25 ;...
+55.25 -7.5 ;...
+55.17 -6.75 ;...
+55.08 -6.08 ;...
+54.75 -5.75 ;...
+54.25 -5.58 ;...
+53.92 -6.25 ;...
+53.5 -6.08 ;...
+53 -6 ;...
+52.58 -6.17 ;...
+52.17 -6.33 ;...
+52.08 -7.25 ;...
+51.75 -8 ;...
+51.58 -8.75 ;...
+51.5 -9.58 ;...
+51.75 -10.08 ;...
+52.17 -10.33 ;...
+NaN NaN ;...
+42.5 8.67 ;...
+42.67 9.17 ;...
+43 9.42 ;...
+42.5 9.42 ;...
+42.17 9.5 ;...
+41.83 9.33 ;...
+41.33 9.17 ;...
+41.75 8.67 ;...
+42.08 8.58 ;...
+42.5 8.67 ;...
+NaN NaN ;...
+40.92 8.17 ;...
+40.83 8.42 ;...
+40.92 8.75 ;...
+41.25 9.25 ;...
+40.83 9.67 ;...
+40.42 9.75 ;...
+40 9.75 ;...
+39.58 9.67 ;...
+39.17 9.5 ;...
+39.17 9.08 ;...
+38.83 8.75 ;...
+39.17 8.33 ;...
+39.67 8.42 ;...
+40.17 8.42 ;...
+40.58 8.17 ;...
+40.92 8.17 ;...
+NaN NaN ;...
+37.92 12.33 ;...
+38.17 12.75 ;...
+38.17 13.25 ;...
+38 13.67 ;...
+38 14.25 ;...
+38.08 15 ;...
+38.25 15.67 ;...
+37.83 15.33 ;...
+37.33 15.17 ;...
+37 15.33 ;...
+36.67 15.17 ;...
+36.75 14.5 ;...
+37.08 14.17 ;...
+37.17 13.67 ;...
+37.5 13 ;...
+37.5 12.67 ;...
+37.92 12.33 ;...
+NaN NaN ;...
+37.75 21.08 ;...
+38.08 21.33 ;...
+38.25 21.83 ;...
+38.08 22.33 ;...
+37.83 22.83 ;...
+37.33 23.17 ;...
+37.42 22.67 ;...
+37 22.83 ;...
+36.42 23.08 ;...
+36.75 22.67 ;...
+36.42 22.33 ;...
+36.75 22.08 ;...
+36.75 21.67 ;...
+37 21.5 ;...
+37.42 21.58 ;...
+37.75 21.08 ;...
+NaN NaN ;...
+35.17 23.5 ;...
+35.58 23.58 ;...
+35.5 24.17 ;...
+35.33 24.25 ;...
+35.42 24.75 ;...
+35.3 25.17 ;...
+35.33 25.75 ;...
+35.08 25.75 ;...
+35.25 26.25 ;...
+34.97 26.17 ;...
+34.93 25.5 ;...
+34.9 24.83 ;...
+35.08 24.5 ;...
+35.17 24 ;...
+35.17 23.5 ;...
+NaN NaN ;...
+35 32.25 ;...
+35.08 32.75 ;...
+35.25 33 ;...
+35.33 33.42 ;...
+35.42 33.92 ;...
+35.58 34.5 ;...
+35.17 33.92 ;...
+34.92 34.08 ;...
+34.92 33.67 ;...
+34.7 33.5 ;...
+34.58 33 ;...
+34.67 32.42 ;...
+35 32.25 ;...
+NaN NaN ;...
+39.58 2.25 ;...
+39.92 2.92 ;...
+39.67 3.42 ;...
+39.27 3.08 ;...
+39.5 2.73 ;...
+39.58 2.25 ;...
+NaN NaN ;...
+36.17 27.67 ;...
+36.45 28.25 ;...
+36.08 28.08 ;...
+35.88 27.75 ;...
+36.17 27.67 ;...
+NaN NaN ;...
+39.0074 26.4216 ;...
+39.048 26.33 ;...
+39.206 26.2123 ;...
+39.1036 26.03 ;...
+39.1469 25.9003 ;...
+39.2177 25.8589 ;...
+39.2744 25.8896 ;...
+39.3775 26.3033 ;...
+39.3553 26.3831 ;...
+39.1366 26.5097 ;...
+39.0074 26.4216 ;...
+NaN NaN ;...
+38.2571 26.0002 ;...
+38.6004 25.8131 ;...
+38.5678 26.0178 ;...
+38.4165 26.1786 ;...
+38.2841 26.0884 ;...
+38.2571 26.0002 ;...
+NaN NaN ;...
+38.5123 -28.1598 ;...
+38.4167 -28.0294 ;...
+38.3839 -28.193 ;...
+38.4 -28.4735 ;...
+38.4655 -28.5366 ;...
+38.5779 -28.4902 ;...
+38.5123 -28.1598 ;...
+NaN NaN ;...
+37.6973 -25.2548 ;...
+37.7382 -25.7897 ;...
+37.783 -25.8258 ;...
+37.9247 -25.814 ;...
+37.8628 -25.4883 ;...
+37.8959 -25.2349 ;...
+37.788 -25.1679 ;...
+37.6973 -25.2548 ;...
+NaN NaN ;...
+28.33 -16.83 ;...
+28.38 -16.5 ;...
+28.58 -16.17 ;...
+28.17 -16.33 ;...
+28 -16.58 ;...
+28.33 -16.83 ;...
+NaN NaN ;...
+28 -15.8 ;...
+28.17 -15.67 ;...
+28.08 -15.37 ;...
+27.83 -15.33 ;...
+27.75 -15.67 ;...
+28 -15.8 ;...
+NaN NaN ;...
+28.8332 -13.7387 ;...
+28.9412 -13.7871 ;...
+29.0435 -13.7669 ;...
+29.1479 -13.5416 ;...
+29.1372 -13.4283 ;...
+29.0418 -13.4122 ;...
+28.8332 -13.7387 ;...
+NaN NaN ;...
+28.1886 -14.2095 ;...
+28.3599 -14.1823 ;...
+28.6419 -13.9613 ;...
+28.4557 -13.8981 ;...
+28.3388 -13.9338 ;...
+28.1886 -14.2095 ;...
+NaN NaN ;...
+3.33 8.5 ;...
+3.75 8.67 ;...
+3.58 8.92 ;...
+3.25 8.75 ;...
+3.33 8.5 ;...
+NaN NaN ;...
+-34.17 115.17 ;...
+-33.58 115.17 ;...
+-33.5 115.58 ;...
+-33.25 115.83 ;...
+-32.92 115.75 ;...
+-32.33 115.83 ;...
+-31.75 115.83 ;...
+-31.42 115.58 ;...
+-31 115.42 ;...
+-30.5 115.17 ;...
+-30 115 ;...
+-29.5 115 ;...
+-29.08 114.92 ;...
+-28.58 114.58 ;...
+-28.08 114.17 ;...
+-27.67 114.17 ;...
+-27 113.92 ;...
+-26.67 113.5 ;...
+-26.17 113.33 ;...
+-26.58 113.83 ;...
+-25.67 113.5 ;...
+-26.08 113.75 ;...
+-26.08 113.75 ;...
+-26.42 114.17 ;...
+-25.83 114.17 ;...
+-25.42 113.92 ;...
+-25 113.67 ;...
+-24.42 113.5 ;...
+-23.92 113.42 ;...
+-23.5 113.83 ;...
+-23.08 113.83 ;...
+-22.67 113.67 ;...
+-22.25 113.83 ;...
+-21.83 114.08 ;...
+-22.33 114.33 ;...
+-21.92 114.58 ;...
+-21.67 115.08 ;...
+-21.5 115.5 ;...
+-21.08 115.83 ;...
+-20.83 116.25 ;...
+-20.67 116.75 ;...
+-20.67 117.25 ;...
+-20.67 117.75 ;...
+-20.33 118.25 ;...
+-20.33 118.75 ;...
+-20 119.17 ;...
+-20 119.75 ;...
+-19.83 120.33 ;...
+-19.75 120.92 ;...
+-19.42 121.33 ;...
+-19.08 121.67 ;...
+-18.58 121.92 ;...
+-18.17 122.33 ;...
+-18.17 122.33 ;...
+-17.83 122.25 ;...
+-17.42 122.17 ;...
+-17 122.58 ;...
+-16.5 123 ;...
+-17.08 123.25 ;...
+-17.5 123.5 ;...
+-17 123.92 ;...
+-16.67 123.58 ;...
+-16.17 123.75 ;...
+-16.42 124.33 ;...
+-16 124.58 ;...
+-15.5 124.67 ;...
+-15.08 125.17 ;...
+-14.67 125.25 ;...
+-14.67 125.92 ;...
+-14.33 126.08 ;...
+-14.25 126.58 ;...
+-13.92 126.83 ;...
+-14 127.42 ;...
+-14.42 127.83 ;...
+-14.75 128.17 ;...
+-15.25 128.17 ;...
+-14.92 128.67 ;...
+-14.92 129.08 ;...
+-15.25 129.67 ;...
+-14.83 129.83 ;...
+-14.5 129.33 ;...
+-14.08 129.75 ;...
+-13.67 129.92 ;...
+-13.58 130.42 ;...
+-13.58 130.42 ;...
+-13.17 130.25 ;...
+-12.75 130.5 ;...
+-12.33 131.08 ;...
+-12.33 131.75 ;...
+-12.33 132.42 ;...
+-12.205 132.483 ;...
+-12.08 132.545 ;...
+-11.955 132.608 ;...
+-11.83 132.67 ;...
+-11.7275 132.585 ;...
+-11.625 132.5 ;...
+-11.5225 132.415 ;...
+-11.42 132.33 ;...
+-11.4601 132.497 ;...
+-11.5002 132.665 ;...
+-11.5401 132.832 ;...
+-11.58 133 ;...
+-11.92 133.5 ;...
+-12 134.08 ;...
+-12.17 134.67 ;...
+-12.33 135.25 ;...
+-12.17 135.83 ;...
+-12.58 136.08 ;...
+-12.08 136.5 ;...
+-12.42 137 ;...
+-12.83 136.67 ;...
+-13.33 136.58 ;...
+-13.33 136 ;...
+-13.83 136 ;...
+-14.25 136 ;...
+-14.58 135.75 ;...
+-15 135.58 ;...
+-15.33 136 ;...
+-15.5 136.42 ;...
+-16 136.58 ;...
+-16 137.33 ;...
+-16.33 137.75 ;...
+-16.58 138.08 ;...
+-16.83 138.67 ;...
+-16.83 138.67 ;...
+-17 139.17 ;...
+-17.42 139.42 ;...
+-17.83 140 ;...
+-17.75 140.5 ;...
+-17.5 141 ;...
+-17 141 ;...
+-16.58 141.25 ;...
+-16.08 141.5 ;...
+-15.58 141.58 ;...
+-15.08 141.67 ;...
+-14.58 141.58 ;...
+-14.08 141.58 ;...
+-13.58 141.67 ;...
+-13 141.75 ;...
+-12.67 141.75 ;...
+-12.17 141.75 ;...
+-11.83 142.08 ;...
+-11.25 142.08 ;...
+-10.83 142.42 ;...
+-11.08 142.83 ;...
+-11.67 142.92 ;...
+-12.17 143.08 ;...
+-12.67 143.33 ;...
+-13.25 143.58 ;...
+-13.75 143.58 ;...
+-14.33 143.75 ;...
+-14.5 144.08 ;...
+-14.33 144.58 ;...
+-14.67 144.92 ;...
+-15 145.33 ;...
+-15 145.33 ;...
+-15.5 145.33 ;...
+-16 145.5 ;...
+-16.58 145.5 ;...
+-17 145.83 ;...
+-17.33 146.08 ;...
+-17.83 146.17 ;...
+-18.33 146.08 ;...
+-18.67 146.33 ;...
+-19 146.42 ;...
+-19.33 147 ;...
+-19.42 147.5 ;...
+-19.83 147.83 ;...
+-20.17 148.33 ;...
+-20.42 148.83 ;...
+-20.83 148.83 ;...
+-21.08 149.17 ;...
+-21.42 149.33 ;...
+-21.92 149.5 ;...
+-22.42 149.75 ;...
+-22.08 150 ;...
+-22.42 150.42 ;...
+-22.67 150.83 ;...
+-23.17 150.83 ;...
+-23.58 151 ;...
+-23.92 151.33 ;...
+-24.08 151.83 ;...
+-24.58 152.17 ;...
+-25.08 152.58 ;...
+-25.5 152.83 ;...
+-26.08 153.08 ;...
+-26.08 153.08 ;...
+-26.5 153.08 ;...
+-27.08 153.17 ;...
+-27.58 153.33 ;...
+-28.17 153.5 ;...
+-28.67 153.67 ;...
+-29 153.5 ;...
+-29.5 153.42 ;...
+-30 153.33 ;...
+-30.58 153.08 ;...
+-31 153.08 ;...
+-31.5 152.92 ;...
+-31.92 152.75 ;...
+-32.33 152.5 ;...
+-32.75 152.25 ;...
+-33 151.83 ;...
+-33.42 151.5 ;...
+-33.83 151.33 ;...
+-34.33 151.08 ;...
+-34.92 150.83 ;...
+-35.25 150.58 ;...
+-35.75 150.25 ;...
+-36.25 150.17 ;...
+-36.75 150 ;...
+-37.17 150 ;...
+-37.58 149.92 ;...
+-37.83 149.42 ;...
+-37.83 148.75 ;...
+-37.92 148.17 ;...
+-38.08 147.83 ;...
+-38.42 147.25 ;...
+-38.42 147.25 ;...
+-38.67 146.83 ;...
+-39.17 146.42 ;...
+-38.92 146 ;...
+-38.67 145.58 ;...
+-38.25 145.5 ;...
+-38.42 145 ;...
+-37.83 144.83 ;...
+-38.25 144.42 ;...
+-38.5 144 ;...
+-38.83 143.5 ;...
+-38.67 143.08 ;...
+-38.42 142.67 ;...
+-38.33 142.25 ;...
+-38.25 141.75 ;...
+-38.42 141.42 ;...
+-38.08 141.17 ;...
+-38 140.67 ;...
+-37.67 140.25 ;...
+-37.33 139.92 ;...
+-37 139.75 ;...
+-36.75 139.92 ;...
+-36.33 139.67 ;...
+-35.92 139.33 ;...
+-35.58 138.83 ;...
+-35.67 138.17 ;...
+-35.42 138.42 ;...
+-35 138.58 ;...
+-34.67 138.42 ;...
+-34.25 138.08 ;...
+-34.75 137.92 ;...
+-34.75 137.92 ;...
+-35.17 137.75 ;...
+-35.17 137.33 ;...
+-35.25 136.92 ;...
+-35 137 ;...
+-34.83 137.42 ;...
+-34.5 137.5 ;...
+-34 137.58 ;...
+-33.58 137.92 ;...
+-32.83 137.83 ;...
+-33.17 137.5 ;...
+-33.67 137.25 ;...
+-33.92 136.75 ;...
+-34.25 136.33 ;...
+-34.58 135.92 ;...
+-35 135.67 ;...
+-34.58 135.42 ;...
+-34.17 135.25 ;...
+-33.75 135 ;...
+-33.25 134.83 ;...
+-33 134.17 ;...
+-32.58 134.33 ;...
+-32.33 133.92 ;...
+-32.17 133.42 ;...
+-32 132.83 ;...
+-32 132.25 ;...
+-31.75 131.83 ;...
+-31.58 131.33 ;...
+-31.58 130.83 ;...
+-31.58 130.33 ;...
+-31.58 129.67 ;...
+-31.58 129.67 ;...
+-31.67 129.08 ;...
+-31.83 128.58 ;...
+-32.08 128 ;...
+-32.17 127.5 ;...
+-32.25 126.92 ;...
+-32.17 126.25 ;...
+-32.33 125.83 ;...
+-32.67 125.25 ;...
+-32.83 124.75 ;...
+-33 124.08 ;...
+-33.5 124 ;...
+-33.83 123.58 ;...
+-33.83 123.08 ;...
+-33.83 122.58 ;...
+-33.83 121.92 ;...
+-33.83 121.33 ;...
+-33.83 120.75 ;...
+-33.92 120.08 ;...
+-34 119.75 ;...
+-34.42 119.42 ;...
+-34.42 119 ;...
+-34.75 118.58 ;...
+-35 118.33 ;...
+-35 117.75 ;...
+-35 117.17 ;...
+-35 116.67 ;...
+-34.83 116.08 ;...
+-34.33 115.67 ;...
+-34.17 115.17 ;...
+NaN NaN ;...
+-11.83 130.08 ;...
+-11.33 130.33 ;...
+-11.5 130.75 ;...
+-11.33 131.33 ;...
+-11.58 131.5 ;...
+-12 131 ;...
+-11.83 130.67 ;...
+-11.83 130.08 ;...
+NaN NaN ;...
+-35.92 136.58 ;...
+-35.75 136.83 ;...
+-35.67 137.42 ;...
+-35.92 138.08 ;...
+-36.17 137.58 ;...
+-36.17 136.83 ;...
+-35.92 136.58 ;...
+NaN NaN ;...
+-40.67 144.83 ;...
+-40.83 145.5 ;...
+-41.17 146.17 ;...
+-41.17 146.75 ;...
+-41 147.33 ;...
+-40.83 148 ;...
+-41 148.33 ;...
+-41.5 148.33 ;...
+-42 148.33 ;...
+-42.17 148.17 ;...
+-42.58 148 ;...
+-43.08 147.92 ;...
+-42.83 147.5 ;...
+-43.17 147.17 ;...
+-43.58 146.83 ;...
+-43.5 146.25 ;...
+-43.25 145.75 ;...
+-42.92 145.5 ;...
+-42.5 145.33 ;...
+-42.17 145.25 ;...
+-41.75 145 ;...
+-41.17 144.83 ;...
+-40.67 144.83 ;...
+NaN NaN ;...
+-36.9214 175.189 ;...
+-37.0138 175.52 ;...
+-36.9496 175.534 ;...
+-36.7885 175.445 ;...
+-36.7061 175.481 ;...
+-36.7879 175.759 ;...
+-37.0249 175.897 ;...
+-37.4263 176.01 ;...
+-37.8988 177.029 ;...
+-37.9645 177.258 ;...
+-37.9352 177.425 ;...
+-37.7054 177.737 ;...
+-37.5652 178.138 ;...
+-37.6606 178.467 ;...
+-37.8643 178.497 ;...
+-38.5343 178.314 ;...
+-38.613 177.84 ;...
+-38.9277 177.827 ;...
+-39.002 177.772 ;...
+-39.0448 177.244 ;...
+-39.1462 177.056 ;...
+-39.2825 176.934 ;...
+-39.4562 176.945 ;...
+-39.6931 177.062 ;...
+-39.7913 177.063 ;...
+-40.1345 176.703 ;...
+-41.1731 175.922 ;...
+-41.4572 175.581 ;...
+-41.6149 175.274 ;...
+-41.429 174.892 ;...
+-41.3135 174.765 ;...
+-40.5968 175.122 ;...
+-40.3534 175.297 ;...
+-40.1814 175.28 ;...
+-39.8067 174.807 ;...
+-39.4631 173.954 ;...
+-39.2614 173.773 ;...
+-39.1968 173.806 ;...
+-38.7129 174.566 ;...
+-38.4491 174.649 ;...
+-38.1028 174.61 ;...
+-37.943 174.644 ;...
+-37.7148 174.811 ;...
+-37.5232 174.822 ;...
+-37.1317 174.612 ;...
+-36.8796 174.652 ;...
+-36.7861 174.601 ;...
+-36.45 174.236 ;...
+-36.4031 174.288 ;...
+-36.1374 174.11 ;...
+-35.973 173.839 ;...
+-36.3185 174.107 ;...
+-36.3814 174.128 ;...
+-36.3778 174.053 ;...
+-35.3438 173.236 ;...
+-35.2917 173.237 ;...
+-35.1717 173.117 ;...
+-35.0383 173.121 ;...
+-34.7601 172.981 ;...
+-34.4376 172.636 ;...
+-34.4181 172.71 ;...
+-34.4634 172.802 ;...
+-34.4069 173.024 ;...
+-34.5353 173.062 ;...
+-34.6179 172.994 ;...
+-34.8604 173.171 ;...
+-34.9941 173.352 ;...
+-34.9273 173.36 ;...
+-34.9406 173.477 ;...
+-35.1597 174.026 ;...
+-35.3419 174.079 ;...
+-35.3551 174.163 ;...
+-35.2724 174.229 ;...
+-35.2856 174.313 ;...
+-35.3596 174.303 ;...
+-35.5221 174.405 ;...
+-35.7385 174.622 ;...
+-35.7506 174.546 ;...
+-35.8246 174.514 ;...
+-35.8987 174.553 ;...
+-35.8649 174.374 ;...
+-36.2331 174.79 ;...
+-36.3753 174.807 ;...
+-36.3667 174.699 ;...
+-36.5629 174.917 ;...
+-36.5539 174.606 ;...
+-36.592 174.596 ;...
+-36.6714 174.848 ;...
+-36.7241 174.795 ;...
+-36.8037 175.2 ;...
+-36.8564 175.123 ;...
+-36.9214 175.189 ;...
+NaN NaN ;...
+-45.92 166.5 ;...
+-45.42 166.83 ;...
+-45.08 167.08 ;...
+-44.75 167.58 ;...
+-44.42 167.92 ;...
+-44.08 168.33 ;...
+-44 168.83 ;...
+-43.67 169.5 ;...
+-43.33 170 ;...
+-43 170.5 ;...
+-42.67 171.08 ;...
+-42.25 171.33 ;...
+-41.83 171.5 ;...
+-41.67 172 ;...
+-41.33 172.17 ;...
+-40.92 172.17 ;...
+-40.67 172.58 ;...
+-40.67 172.58 ;...
+-40.92 173.08 ;...
+-41.33 173.17 ;...
+-41.17 173.5 ;...
+-40.92 173.83 ;...
+-41.25 173.92 ;...
+-41.33 174.33 ;...
+-41.83 174.33 ;...
+-42.17 174 ;...
+-42.58 173.58 ;...
+-43 173.25 ;...
+-43.17 172.83 ;...
+-43.5 172.75 ;...
+-43.75 173.08 ;...
+-43.75 172.42 ;...
+-44.08 172 ;...
+-44.33 171.42 ;...
+-44.67 171.25 ;...
+-45.08 171.08 ;...
+-45.5 170.92 ;...
+-45.92 170.58 ;...
+-46.25 170.17 ;...
+-46.5 169.67 ;...
+-46.67 169 ;...
+-46.5 168.33 ;...
+-46.42 167.83 ;...
+-46.17 167.58 ;...
+-46.25 167.25 ;...
+-46.17 166.75 ;...
+-45.92 166.5 ;...
+NaN NaN ;...
+-47.33 167.58 ;...
+-46.67 168 ;...
+-47 168.25 ;...
+-47.33 167.58 ;...
+NaN NaN ;...
+5.67 95.33 ;...
+5.67 95.92 ;...
+5.33 96.33 ;...
+5.33 96.92 ;...
+5.25 97.58 ;...
+4.92 98 ;...
+4.5 98.25 ;...
+4.08 98.58 ;...
+4.08 98.58 ;...
+3.75 99 ;...
+3.5 99.5 ;...
+3.17 100 ;...
+2.67 100.33 ;...
+2.17 100.75 ;...
+2.25 101.25 ;...
+1.75 101.75 ;...
+1.67 102.25 ;...
+1.17 102.67 ;...
+1.08 103.08 ;...
+0.58 103 ;...
+0.58 103.42 ;...
+0.17 103.83 ;...
+-0.25 103.75 ;...
+-0.67 103.5 ;...
+-1 103.83 ;...
+-1 104.42 ;...
+-1.67 104.58 ;...
+-1.92 104.92 ;...
+-2.33 105.08 ;...
+-2.33 105.58 ;...
+-2.67 105.92 ;...
+-3 106.17 ;...
+-3.5 105.92 ;...
+-4.17 105.92 ;...
+-4.67 105.92 ;...
+-5.25 105.83 ;...
+-5.75 105.75 ;...
+-5.5 105.33 ;...
+-5.5 104.92 ;...
+-5.5 104.92 ;...
+-5.75 104.75 ;...
+-5.58 104.42 ;...
+-5.08 104 ;...
+-4.75 103.5 ;...
+-4.25 102.83 ;...
+-3.83 102.42 ;...
+-3.5 102.25 ;...
+-3.17 101.75 ;...
+-2.58 101.33 ;...
+-2.08 100.92 ;...
+-1.67 100.83 ;...
+-1.08 100.5 ;...
+-0.67 100.42 ;...
+-0.25 100 ;...
+0.08 99.83 ;...
+0.33 99.25 ;...
+1 99.08 ;...
+1.5 98.83 ;...
+1.83 98.83 ;...
+2.17 98.33 ;...
+2.42 97.83 ;...
+2.92 97.67 ;...
+3.33 97.25 ;...
+3.75 96.92 ;...
+3.83 96.58 ;...
+4.33 96.08 ;...
+4.75 95.67 ;...
+5.17 95.42 ;...
+5.67 95.33 ;...
+NaN NaN ;...
+-6.67 105.5 ;...
+-6.33 105.83 ;...
+-5.83 106.08 ;...
+-6 106.67 ;...
+-5.83 107.08 ;...
+-5.92 107.42 ;...
+-6.17 107.92 ;...
+-6.25 108.42 ;...
+-6.67 108.67 ;...
+-6.75 109.08 ;...
+-6.75 109.58 ;...
+-6.83 110.08 ;...
+-6.83 110.5 ;...
+-6.33 110.83 ;...
+-6.33 110.83 ;...
+-6.58 111.33 ;...
+-6.75 111.92 ;...
+-6.83 112.42 ;...
+-7.08 112.75 ;...
+-7.5 112.83 ;...
+-7.67 113.42 ;...
+-7.58 113.92 ;...
+-7.67 114.42 ;...
+-8.17 114.42 ;...
+-8.58 114.42 ;...
+-8.5 114 ;...
+-8.33 113.58 ;...
+-8.17 113.08 ;...
+-8.33 112.5 ;...
+-8.17 112 ;...
+-8.17 111.5 ;...
+-8.08 111 ;...
+-8 110.5 ;...
+-7.75 110 ;...
+-7.67 109.58 ;...
+-7.67 109 ;...
+-7.75 108.58 ;...
+-7.67 108 ;...
+-7.42 107.58 ;...
+-7.33 107 ;...
+-7.25 106.5 ;...
+-7 106.58 ;...
+-6.75 106 ;...
+-6.67 105.5 ;...
+NaN NaN ;...
+-8.74 115.25 ;...
+-8.39 114.7 ;...
+-8.25 114.66 ;...
+-8.17 115.25 ;...
+-8.41 115.66 ;...
+-8.74 115.25 ;...
+NaN NaN ;...
+-8.41105 118.028 ;...
+-8.31514 117.8 ;...
+-8.2385 117.771 ;...
+-8.14112 117.782 ;...
+-8.04694 117.892 ;...
+-8.02632 118.029 ;...
+-8.20451 118.21 ;...
+-8.13412 118.341 ;...
+-8.17755 118.491 ;...
+-8.2455 118.538 ;...
+-8.4523 118.538 ;...
+-8.24048 118.684 ;...
+-8.22294 118.823 ;...
+-8.32303 118.924 ;...
+-8.5166 118.938 ;...
+-8.49766 119.017 ;...
+-8.54485 119.045 ;...
+-8.72669 119.038 ;...
+-8.63485 118.898 ;...
+-8.61236 118.762 ;...
+-8.64094 118.701 ;...
+-8.71093 118.735 ;...
+-8.79438 118.364 ;...
+-8.75479 118.301 ;...
+-8.63076 118.34 ;...
+-8.60522 118.303 ;...
+-8.85262 118.074 ;...
+-8.83527 117.944 ;...
+-9.04628 117.402 ;...
+-9.02781 117.245 ;...
+-9.08827 117.111 ;...
+-9.06164 116.94 ;...
+-8.97739 116.804 ;...
+-8.85019 116.774 ;...
+-8.72887 116.833 ;...
+-8.61456 116.795 ;...
+-8.42958 117.078 ;...
+-8.49826 117.619 ;...
+-8.62511 117.661 ;...
+-8.6928 117.918 ;...
+-8.57196 117.998 ;...
+-8.5627 118.124 ;...
+-8.47663 118.144 ;...
+-8.41105 118.028 ;...
+NaN NaN ;...
+-8.21715 123.122 ;...
+-8.37718 122.814 ;...
+-8.48047 122.829 ;...
+-8.78892 122.153 ;...
+-8.77218 122.011 ;...
+-8.9188 121.82 ;...
+-8.84247 121.427 ;...
+-8.9683 121.28 ;...
+-8.9854 121.14 ;...
+-8.77019 120.61 ;...
+-8.75375 119.975 ;...
+-8.49831 119.877 ;...
+-8.38806 119.886 ;...
+-8.29203 120.052 ;...
+-8.21221 120.365 ;...
+-8.26389 120.668 ;...
+-8.51063 121.246 ;...
+-8.648 121.429 ;...
+-8.65238 121.572 ;...
+-8.51884 121.709 ;...
+-8.47615 122.086 ;...
+-8.55709 122.26 ;...
+-8.50313 122.457 ;...
+-8.40507 122.426 ;...
+-8.17275 122.843 ;...
+-8.09668 122.896 ;...
+-8.04963 122.857 ;...
+-8.07447 122.757 ;...
+-7.97545 122.823 ;...
+-7.99363 123.027 ;...
+-8.08062 123.119 ;...
+-8.21715 123.122 ;...
+NaN NaN ;...
+-8.4803 123.557 ;...
+-8.37069 123.375 ;...
+-8.25229 123.526 ;...
+-8.24991 123.865 ;...
+-8.35135 123.887 ;...
+-8.4803 123.557 ;...
+NaN NaN ;...
+-8.88507 116.415 ;...
+-8.82908 115.959 ;...
+-8.68125 116.126 ;...
+-8.48053 116.133 ;...
+-8.32835 116.236 ;...
+-8.28269 116.442 ;...
+-8.37907 116.656 ;...
+-8.88507 116.415 ;...
+NaN NaN ;...
+-8.42 124.33 ;...
+-8.08 124.5 ;...
+-8.1226 124.667 ;...
+-8.16514 124.835 ;...
+-8.2076 125.002 ;...
+-8.25 125.17 ;...
+-8.29267 124.96 ;...
+-8.33522 124.75 ;...
+-8.37767 124.54 ;...
+-8.42 124.33 ;...
+NaN NaN ;...
+-7.92 125.83 ;...
+-7.58 126 ;...
+-7.5 126.67 ;...
+-7.92 126.42 ;...
+-7.92 125.83 ;...
+NaN NaN ;...
+-9.42 119 ;...
+-9.33 119.5 ;...
+-9.33 120 ;...
+-9.58 120.5 ;...
+-10 120.92 ;...
+-10.25 120.42 ;...
+-9.92 120 ;...
+-9.75 119.67 ;...
+-9.75 119.17 ;...
+-9.42 119 ;...
+NaN NaN ;...
+-10.33 123.5 ;...
+-9.67 123.67 ;...
+-9.33 123.92 ;...
+-9.17 124.33 ;...
+-9 124.83 ;...
+-8.67 125.08 ;...
+-8.5 125.67 ;...
+-8.42 126.25 ;...
+-8.33 126.75 ;...
+-8.33 127.25 ;...
+-8.67 126.92 ;...
+-8.92 126.42 ;...
+-9.08 125.92 ;...
+-9.25 125.42 ;...
+-9.58 124.92 ;...
+-10.08 124.5 ;...
+-10.25 124 ;...
+-10.33 123.5 ;...
+NaN NaN ;...
+-7.92 131.17 ;...
+-7.42 131.25 ;...
+-7.08 131.58 ;...
+-7.58 131.58 ;...
+-7.92 131.17 ;...
+NaN NaN ;...
+-6.92 134.08 ;...
+-6.33 134.08 ;...
+-5.83 134.25 ;...
+-5.42 134.5 ;...
+-5.92 134.67 ;...
+-6.5 134.5 ;...
+-6.92 134.08 ;...
+NaN NaN ;...
+-1.92 105.25 ;...
+-1.58 105.5 ;...
+-1.5 106 ;...
+-2 106.25 ;...
+-2.42 106.42 ;...
+-2.5 106.75 ;...
+-3 106.67 ;...
+-2.62 106 ;...
+-2.25 106 ;...
+-2 105.75 ;...
+-1.92 105.25 ;...
+NaN NaN ;...
+-3.08 107.67 ;...
+-2.5 107.75 ;...
+-2.67 108.33 ;...
+-3.17 108.08 ;...
+-3.08 107.67 ;...
+NaN NaN ;...
+-7.17947 113.059 ;...
+-7.13004 112.93 ;...
+-6.98004 112.84 ;...
+-6.90381 113.003 ;...
+-6.92181 113.142 ;...
+-6.85527 113.795 ;...
+-6.89199 113.951 ;...
+-7.16353 113.639 ;...
+-7.17947 113.059 ;...
+NaN NaN ;...
+1 108.92 ;...
+1.58 109.08 ;...
+2 109.5 ;...
+1.75 110 ;...
+1.67 110.58 ;...
+1.58 111.08 ;...
+2.25 111.33 ;...
+2.83 111.5 ;...
+2.92 112 ;...
+3 112.58 ;...
+3.33 113.08 ;...
+3.75 113.33 ;...
+4.08 113.75 ;...
+4.08 113.75 ;...
+4.67 114.25 ;...
+4.92 114.75 ;...
+4.92 115.42 ;...
+5.33 115.42 ;...
+5.83 116 ;...
+6.25 116.17 ;...
+6.58 116.67 ;...
+7 117 ;...
+6.75 117.25 ;...
+6.42 117.75 ;...
+5.92 117.75 ;...
+5.75 118.25 ;...
+5.42 118.75 ;...
+5.17 119.25 ;...
+5 118.75 ;...
+4.83 118.25 ;...
+4.42 118.58 ;...
+4.25 117.92 ;...
+3.67 117.83 ;...
+3.25 117.5 ;...
+2.75 117.75 ;...
+2.33 118.08 ;...
+2 117.83 ;...
+1.58 118.25 ;...
+1.25 118.67 ;...
+0.92 119 ;...
+0.83 118.42 ;...
+0.83 117.92 ;...
+0.5 117.67 ;...
+0 117.5 ;...
+0 117.5 ;...
+-0.58 117.42 ;...
+-1 117.17 ;...
+-1.33 116.75 ;...
+-1.75 116.33 ;...
+-2.25 116.67 ;...
+-2.92 116.33 ;...
+-3.42 116.17 ;...
+-3.67 115.67 ;...
+-3.92 115.17 ;...
+-4.08 114.75 ;...
+-3.67 114.67 ;...
+-3.33 114.33 ;...
+-3.42 113.83 ;...
+-3.08 113.25 ;...
+-3.33 112.83 ;...
+-3.33 112.33 ;...
+-3.5 111.83 ;...
+-2.75 111.75 ;...
+-2.92 111.42 ;...
+-3 110.92 ;...
+-2.92 110.33 ;...
+-2.33 110.25 ;...
+-1.75 110.08 ;...
+-1.25 110.08 ;...
+-0.75 109.75 ;...
+-0.42 109.25 ;...
+0.08 109.25 ;...
+0.42 109 ;...
+1 108.92 ;...
+NaN NaN ;...
+-2.83 118.83 ;...
+-2.42 119.17 ;...
+-1.92 119.33 ;...
+-1.33 119.33 ;...
+-0.92 119.5 ;...
+-0.58 119.83 ;...
+-0.08 119.83 ;...
+0.42 119.92 ;...
+0.75 120 ;...
+0.75 120.67 ;...
+1.33 120.92 ;...
+1.17 121.42 ;...
+1.08 122 ;...
+1 122.5 ;...
+0.83 122.92 ;...
+0.92 123.33 ;...
+0.92 123.92 ;...
+1.17 124.42 ;...
+1.5 124.75 ;...
+1.75 125 ;...
+1.75 125 ;...
+1.5 125.25 ;...
+1.08 125 ;...
+0.58 124.58 ;...
+0.42 124 ;...
+0.33 123.5 ;...
+0.5 123 ;...
+0.5 122.5 ;...
+0.5 122 ;...
+0.5 121.42 ;...
+0.42 121 ;...
+0.5 120.67 ;...
+0.42 120.33 ;...
+-0.08 120.08 ;...
+-0.67 120.08 ;...
+-0.92 120.58 ;...
+-1.33 120.67 ;...
+-1.33 121.17 ;...
+-0.92 121.58 ;...
+-0.92 122.08 ;...
+-0.75 122.58 ;...
+-0.58 123.17 ;...
+-0.92 123.5 ;...
+-0.92 122.92 ;...
+-1.33 122.5 ;...
+-1.67 122.08 ;...
+-1.92 121.5 ;...
+-2.42 121.92 ;...
+-2.75 122.17 ;...
+-3.17 122.42 ;...
+-3.5 122.08 ;...
+-3.5 122.08 ;...
+-3.83 122.5 ;...
+-4.33 122.83 ;...
+-4.75 123.17 ;...
+-5.33 123.17 ;...
+-5.67 122.67 ;...
+-5.33 122.33 ;...
+-4.58 122.33 ;...
+-4.83 122 ;...
+-4.67 121.58 ;...
+-4.08 121.67 ;...
+-3.83 121.17 ;...
+-3.42 120.92 ;...
+-3 121.08 ;...
+-2.67 121.08 ;...
+-2.67 120.67 ;...
+-3 120.25 ;...
+-3.5 120.42 ;...
+-4 120.42 ;...
+-4.5 120.42 ;...
+-5 120.33 ;...
+-5.5 120.42 ;...
+-5.58 119.92 ;...
+-5.58 119.5 ;...
+-5.17 119.42 ;...
+-4.58 119.58 ;...
+-4 119.67 ;...
+-3.5 119.5 ;...
+-3.5 119 ;...
+-2.83 118.83 ;...
+NaN NaN ;...
+1.08 127.33 ;...
+1.5 127.5 ;...
+1.92 127.58 ;...
+2.17 128 ;...
+1.75 127.83 ;...
+1.33 128.08 ;...
+1.58 128.67 ;...
+1.08 128.67 ;...
+0.83 128.25 ;...
+0.5 128.5 ;...
+0.5 128 ;...
+0 127.92 ;...
+-0.42 128 ;...
+-0.92 128.42 ;...
+-0.58 127.92 ;...
+-0.17 127.58 ;...
+0.25 127.67 ;...
+0.67 127.58 ;...
+0.772502 127.518 ;...
+0.875002 127.455 ;...
+0.977502 127.393 ;...
+1.08 127.33 ;...
+NaN NaN ;...
+-3.25 126 ;...
+-3.08 126.5 ;...
+-3.17 127 ;...
+-3.58 127.17 ;...
+-3.83 126.83 ;...
+-3.67 126.33 ;...
+-3.25 126 ;...
+NaN NaN ;...
+-3.17 127.92 ;...
+-2.83 128.25 ;...
+-2.83 128.92 ;...
+-2.75 129.5 ;...
+-3 129.92 ;...
+-3 130.42 ;...
+-3.42 130.75 ;...
+-3.83 130.75 ;...
+-3.58 130.25 ;...
+-3.42 129.42 ;...
+-3.33 128.92 ;...
+-3.42 128.5 ;...
+-3.17 127.92 ;...
+NaN NaN ;...
+-1.97171 124.616 ;...
+-1.93597 124.508 ;...
+-1.73058 124.549 ;...
+-1.68383 124.702 ;...
+-1.78354 125.191 ;...
+-1.89915 125.372 ;...
+-1.97171 124.616 ;...
+NaN NaN ;...
+-1.67 127.33 ;...
+-1.33 127.58 ;...
+-1.67 128.17 ;...
+-1.67003 127.96 ;...
+-1.67004 127.75 ;...
+-1.67003 127.54 ;...
+-1.67 127.33 ;...
+NaN NaN ;...
+-0.17 130.33 ;...
+0 130.75 ;...
+-0.25 131.25 ;...
+-0.42 130.75 ;...
+-0.17 130.33 ;...
+NaN NaN ;...
+-0.67 135.33 ;...
+-0.67 135.92 ;...
+-1.08 136.25 ;...
+-1.17 135.83 ;...
+-0.67 135.33 ;...
+NaN NaN ;...
+-1.33 130.83 ;...
+-0.83 131.25 ;...
+-0.83 131.25 ;...
+-0.67 131.83 ;...
+-0.33 132.17 ;...
+-0.33 132.83 ;...
+-0.67 133.25 ;...
+-0.67 133.92 ;...
+-1.33 134.17 ;...
+-1.83 134 ;...
+-2.33 134.17 ;...
+-2.5 134.58 ;...
+-3 134.75 ;...
+-3.33 135.17 ;...
+-3.33 135.58 ;...
+-2.92 135.83 ;...
+-2.58 136.08 ;...
+-2.17 136.5 ;...
+-2.08 137 ;...
+-1.67 137.25 ;...
+-1.42 137.92 ;...
+-1.67 138.42 ;...
+-2 139 ;...
+-2.17 139.5 ;...
+-2.33 140 ;...
+-2.33 140.42 ;...
+-2.58 141 ;...
+-2.75 141.5 ;...
+-2.92 142 ;...
+-3.08 142.42 ;...
+-3.33 143 ;...
+-3.42 143.5 ;...
+-3.67 144 ;...
+-3.67 144 ;...
+-3.92 144.5 ;...
+-4.25 145 ;...
+-4.5 145.42 ;...
+-4.92 145.75 ;...
+-5.5 145.75 ;...
+-5.58 146.25 ;...
+-5.92 146.83 ;...
+-5.83 147.42 ;...
+-6.17 147.83 ;...
+-6.67 147.83 ;...
+-6.75 147.42 ;...
+-6.83 147 ;...
+-7.25 147.17 ;...
+-7.75 147.67 ;...
+-8 148.17 ;...
+-8.58 148.25 ;...
+-9.08 148.58 ;...
+-9 149.25 ;...
+-9.42 149.08 ;...
+-9.58 149.58 ;...
+-9.58005 149.685 ;...
+-9.58006 149.79 ;...
+-9.58005 149.895 ;...
+-9.58 150 ;...
+-9.66502 149.938 ;...
+-9.75002 149.875 ;...
+-9.83502 149.813 ;...
+-9.92 149.75 ;...
+-9.96005 149.855 ;...
+-10.0001 149.96 ;...
+-10.04 150.065 ;...
+-10.08 150.17 ;...
+-10.17 150.75 ;...
+-10.58 150.58 ;...
+-10.67 150 ;...
+-10.33 149.75 ;...
+-10.25 149.25 ;...
+-10.17 148.67 ;...
+-10.08 148.08 ;...
+-10.08 148.08 ;...
+-10 147.58 ;...
+-9.67 147.42 ;...
+-9.33 147 ;...
+-8.92 146.5 ;...
+-8.5 146.25 ;...
+-8 146 ;...
+-7.92 145.58 ;...
+-7.67 145 ;...
+-7.5 144.67 ;...
+-7.5 144.17 ;...
+-7.92 143.83 ;...
+-8.17 143.58 ;...
+-8.33 143.17 ;...
+-8.75 143.33 ;...
+-9 143.17 ;...
+-9.25 142.58 ;...
+-9.08 142 ;...
+-9.08 141.5 ;...
+-9 141 ;...
+-8.67 140.58 ;...
+-8.33 140.25 ;...
+-8 140 ;...
+-8.08 139.42 ;...
+-8.08 138.83 ;...
+-8.33 138.25 ;...
+-8.33 137.67 ;...
+-7.75 138 ;...
+-7.33 138.25 ;...
+-7.25 138.75 ;...
+-6.75 138.58 ;...
+-6.75 138.58 ;...
+-6.17 138.42 ;...
+-5.58 138.17 ;...
+-5.25 137.83 ;...
+-5 137.33 ;...
+-4.83 136.75 ;...
+-4.58 136.17 ;...
+-4.42 135.58 ;...
+-4.33 135 ;...
+-4.08 134.58 ;...
+-3.83 134.08 ;...
+-3.5 133.67 ;...
+-4 133.25 ;...
+-4 132.83 ;...
+-3.58 132.75 ;...
+-3.25 132.67 ;...
+-2.92 132.25 ;...
+-2.83 131.92 ;...
+-2.67 132.17 ;...
+-2.75 132.67 ;...
+-2.5 133.08 ;...
+-2.5 133.67 ;...
+-2 133.83 ;...
+-2.17 133.25 ;...
+-2.25 132.75 ;...
+-2.25 132.25 ;...
+-2 131.83 ;...
+-1.58 131.83 ;...
+-1.5 131.42 ;...
+-1.33 130.83 ;...
+NaN NaN ;...
+16.25 119.83 ;...
+16.08 120.33 ;...
+16.58 120.33 ;...
+17.17 120.5 ;...
+17.58 120.33 ;...
+18 120.5 ;...
+18.5 120.67 ;...
+18.58 121.08 ;...
+18.33 121.5 ;...
+18.25 121.92 ;...
+18.5 122.33 ;...
+17.92 122.17 ;...
+17.42 122.17 ;...
+17.3351 122.253 ;...
+17.2501 122.335 ;...
+17.1651 122.418 ;...
+17.08 122.5 ;...
+16.955 122.457 ;...
+16.83 122.415 ;...
+16.705 122.372 ;...
+16.58 122.33 ;...
+16.17 122 ;...
+15.75 121.58 ;...
+15.75 121.58 ;...
+15.25 121.42 ;...
+14.58 121.67 ;...
+14.08 121.83 ;...
+13.92 122.25 ;...
+14.17 122.58 ;...
+14.17 122.92 ;...
+14 123.33 ;...
+13.75 123.92 ;...
+13.67 123.58 ;...
+13.33 123.75 ;...
+12.92 124.17 ;...
+12.83 123.83 ;...
+13 123.42 ;...
+13.58 123.08 ;...
+13.83 122.58 ;...
+13.5 122.67 ;...
+13.08 122.67 ;...
+13.67 122.17 ;...
+13.92 121.75 ;...
+13.58 121.25 ;...
+13.75 120.67 ;...
+14.17 120.67 ;...
+14.5 121 ;...
+14.75 120.67 ;...
+14.42 120.58 ;...
+14.75 120.08 ;...
+15.25 120 ;...
+15.67 119.92 ;...
+16.25 119.83 ;...
+NaN NaN ;...
+8.42 117.25 ;...
+8.92 117.5 ;...
+9.25 117.92 ;...
+9.67 118.42 ;...
+10 118.75 ;...
+10.5 119.17 ;...
+11.25 119.5 ;...
+10.5 119.67 ;...
+10.08 119.17 ;...
+9.92 118.83 ;...
+9.42 118.58 ;...
+9.08 118.17 ;...
+8.75 117.92 ;...
+8.42 117.25 ;...
+NaN NaN ;...
+13.42 120.42 ;...
+13.42 120.83 ;...
+13.33 121.25 ;...
+13 121.58 ;...
+12.5 121.58 ;...
+12.17 121.17 ;...
+12.5 120.92 ;...
+13 120.75 ;...
+13.42 120.42 ;...
+NaN NaN ;...
+12.5 124.33 ;...
+12.42 124.75 ;...
+12.5 125.25 ;...
+12.08 125.5 ;...
+11.67 125.5 ;...
+11.08 125.67 ;...
+11.08 125 ;...
+10.67 125 ;...
+10.25 125.25 ;...
+10.17 124.75 ;...
+10.75 124.75 ;...
+10.83 124.42 ;...
+11.42 124.42 ;...
+11.33 124.83 ;...
+11.435 124.872 ;...
+11.54 124.915 ;...
+11.645 124.957 ;...
+11.75 125 ;...
+11.8126 124.875 ;...
+11.8751 124.75 ;...
+11.9376 124.625 ;...
+12 124.5 ;...
+12.5 124.33 ;...
+NaN NaN ;...
+10.5 121.92 ;...
+10.92 122 ;...
+11.42 122.08 ;...
+11.83 121.92 ;...
+11.75 122.5 ;...
+11.5 122.58 ;...
+11.5 123.17 ;...
+10.92 123.08 ;...
+10.83 123.5 ;...
+10.42 123.42 ;...
+10.67 123.83 ;...
+11.08 124 ;...
+10.58 124 ;...
+10.17 124 ;...
+10.08 124.5 ;...
+9.67 124.5 ;...
+9.58 124 ;...
+9.92 123.75 ;...
+9.58 123.5 ;...
+9.75 123.17 ;...
+9.25 123.33 ;...
+9 123 ;...
+9.33 122.67 ;...
+9.43502 122.608 ;...
+9.54002 122.545 ;...
+9.64502 122.483 ;...
+9.75 122.42 ;...
+9.81255 122.522 ;...
+9.87506 122.625 ;...
+9.93755 122.727 ;...
+10 122.83 ;...
+10.42 122.83 ;...
+10.58 122.5 ;...
+10.5 121.92 ;...
+NaN NaN ;...
+7.17 122 ;...
+7.75 122.08 ;...
+8 122.33 ;...
+8.17 122.83 ;...
+8.5 123.08 ;...
+8.67 123.5 ;...
+8.5 123.83 ;...
+8.08 123.92 ;...
+8.42 124.25 ;...
+8.5 124.67 ;...
+9 124.83 ;...
+8.83 125.17 ;...
+9.08 125.5 ;...
+9.75 125.42 ;...
+9.75 125.42 ;...
+9.33 126 ;...
+8.83 126.33 ;...
+8.25 126.33 ;...
+7.67 126.58 ;...
+7.25 126.58 ;...
+6.83 126.33 ;...
+6.33 126.17 ;...
+6.92 126 ;...
+7.33 125.75 ;...
+7 125.5 ;...
+6.75 125.33 ;...
+6.42 125.58 ;...
+6.08 125.75 ;...
+5.58 125.42 ;...
+5.92 125.08 ;...
+5.92 124.75 ;...
+6.17 124.33 ;...
+6.42 124 ;...
+7 124 ;...
+7.33 124.25 ;...
+7.58 124 ;...
+7.75 123.58 ;...
+7.42 123.25 ;...
+7.33 122.92 ;...
+7.75 122.83 ;...
+7.67 122.5 ;...
+7.25 122.25 ;...
+6.83 122.08 ;...
+7.17 122 ;...
+NaN NaN ;...
+-5.5 148.25 ;...
+-5.58 148.83 ;...
+-5.58 149.42 ;...
+-5.5 150 ;...
+-5.5 150.42 ;...
+-5.5 150.83 ;...
+-4.92 151.25 ;...
+-4.92 151.67 ;...
+-4.17 151.5 ;...
+-4.17 152.25 ;...
+-4.67 152.42 ;...
+-5 152.08 ;...
+-5.58 152 ;...
+-5.58 151.58 ;...
+-6 151.17 ;...
+-6.25 150.67 ;...
+-6.25 150.17 ;...
+-6.25 149.67 ;...
+-6.17 149.17 ;...
+-5.83 148.83 ;...
+-5.75 148.33 ;...
+-5.5 148.25 ;...
+NaN NaN ;...
+-2.21577 147.198 ;...
+-2.18725 146.719 ;...
+-2.13789 146.596 ;...
+-1.98672 146.757 ;...
+-1.966 147.092 ;...
+-2.07707 147.366 ;...
+-2.21577 147.198 ;...
+NaN NaN ;...
+-3.73635 152.468 ;...
+-4.16656 153.061 ;...
+-4.28378 153.092 ;...
+-4.60661 153.049 ;...
+-4.74546 152.955 ;...
+-4.78954 152.881 ;...
+-4.55773 152.723 ;...
+-4.25507 152.721 ;...
+-3.73635 152.468 ;...
+NaN NaN ;...
+-3.45722 152.022 ;...
+-3.45088 151.938 ;...
+-3.04936 151.304 ;...
+-2.83616 151.072 ;...
+-2.76105 151.146 ;...
+-3.10603 151.622 ;...
+-3.35671 152.067 ;...
+-3.43183 152.084 ;...
+-3.45722 152.022 ;...
+NaN NaN ;...
+-5.42 154.67 ;...
+-5.5 155.08 ;...
+-6.17 155.58 ;...
+-6.58 156 ;...
+-6.83 155.5 ;...
+-6.5 155.17 ;...
+-6 154.83 ;...
+-5.42 154.67 ;...
+NaN NaN ;...
+-7.30804 157.408 ;...
+-7.34627 157.305 ;...
+-7.26192 157.033 ;...
+-6.79431 156.488 ;...
+-6.67686 156.479 ;...
+-6.82917 156.886 ;...
+-7.11623 157.139 ;...
+-7.30804 157.408 ;...
+NaN NaN ;...
+-8.53103 157.849 ;...
+-8.51261 157.698 ;...
+-8.33216 157.552 ;...
+-8.29246 157.462 ;...
+-8.29963 157.3 ;...
+-8.02901 157.362 ;...
+-7.96993 157.437 ;...
+-7.99715 157.57 ;...
+-8.41673 157.885 ;...
+-8.53103 157.849 ;...
+NaN NaN ;...
+-8.47312 159.783 ;...
+-7.88709 158.726 ;...
+-7.67924 158.554 ;...
+-7.55889 158.606 ;...
+-7.96476 159.168 ;...
+-8.26925 159.741 ;...
+-8.35814 159.839 ;...
+-8.47312 159.783 ;...
+NaN NaN ;...
+-9.51514 161.285 ;...
+-9.1631 160.89 ;...
+-8.92188 160.834 ;...
+-8.57258 160.668 ;...
+-8.47113 160.678 ;...
+-8.45288 160.803 ;...
+-8.53599 160.904 ;...
+-9.51514 161.285 ;...
+NaN NaN ;...
+-9.25 159.5 ;...
+-9.33 160.25 ;...
+-9.67 160.83 ;...
+-9.92 160.5 ;...
+-9.83 160.08 ;...
+-9.83 159.67 ;...
+-9.5 159.42 ;...
+-9.25 159.5 ;...
+NaN NaN ;...
+-10.17 161.25 ;...
+-10.33 162 ;...
+-10.75 162.33 ;...
+-10.67 161.75 ;...
+-10.17 161.25 ;...
+NaN NaN ;...
+-15.6618 166.776 ;...
+-14.7996 166.559 ;...
+-14.7171 166.578 ;...
+-14.8255 166.713 ;...
+-15.1995 166.845 ;...
+-15.1803 166.986 ;...
+-14.9903 167.063 ;...
+-15.4712 167.238 ;...
+-15.5345 167.2 ;...
+-15.5856 166.913 ;...
+-15.6618 166.776 ;...
+NaN NaN ;...
+-15.92 167.33 ;...
+-16.42 167.92 ;...
+-16.5 167.5 ;...
+-15.92 167.33 ;...
+NaN NaN ;...
+-20.25 164.25 ;...
+-20.5 164.75 ;...
+-20.75 165.33 ;...
+-21.17 165.67 ;...
+-21.5 166 ;...
+-21.67 166.5 ;...
+-22 166.83 ;...
+-22.33 167.17 ;...
+-22.33 166.67 ;...
+-22 166.17 ;...
+-21.75 165.75 ;...
+-21.5 165.42 ;...
+-21.25 165 ;...
+-20.83 164.58 ;...
+-20.25 164.25 ;...
+NaN NaN ;...
+-43.77 -176.83 ;...
+-43.8 -176.17 ;...
+-44.17 -176.58 ;...
+-43.9 -176.5 ;...
+-43.77 -176.83 ;...
+NaN NaN ;...
+-18 177.33 ;...
+-17.58 177.42 ;...
+-17.33 177.75 ;...
+-17.33 178.25 ;...
+-17.58 178.67 ;...
+-18 178.67 ;...
+-18.17 178.17 ;...
+-18.17 177.75 ;...
+-18 177.33 ;...
+NaN NaN ;...
+-16.58 178.58 ;...
+-16.42 179.17 ;...
+-16.17 179.83 ;...
+-16.5 179.67 ;...
+-16.67 179.92 ;...
+-16.67 179.33 ;...
+-16.92 178.75 ;...
+-16.58 178.58 ;...
+NaN NaN ;...
+-17.47 -149.58 ;...
+-17.53 -149.33 ;...
+-17.83 -149.17 ;...
+-17.73 -149.5 ;...
+-17.47 -149.58 ;...
+NaN NaN ;...
+22.03 -159.82 ;...
+22.22 -159.58 ;...
+22.22 -159.3 ;...
+21.9 -159.38 ;...
+21.9 -159.62 ;...
+22.03 -159.82 ;...
+NaN NaN ;...
+21.58 -158.28 ;...
+21.72 -157.98 ;...
+21.3 -157.67 ;...
+21.3 -158.12 ;...
+21.58 -158.28 ;...
+NaN NaN ;...
+21.2134 -157.213 ;...
+21.1608 -156.716 ;...
+21.0424 -156.858 ;...
+21.1017 -157.3 ;...
+21.2134 -157.213 ;...
+NaN NaN ;...
+20.95 -156.68 ;...
+20.9 -156.47 ;...
+20.97 -156.28 ;...
+20.73 -155.98 ;...
+20.62 -156.2 ;...
+20.57 -156.43 ;...
+20.78 -156.47 ;...
+20.95 -156.68 ;...
+NaN NaN ;...
+20.27 -155.88 ;...
+20.15 -155.63 ;...
+20.05 -155.37 ;...
+19.92 -155.13 ;...
+19.68 -155.02 ;...
+19.52 -154.8 ;...
+19.33 -155 ;...
+19.27 -155.3 ;...
+19.13 -155.52 ;...
+18.93 -155.65 ;...
+19.08 -155.92 ;...
+19.35 -155.88 ;...
+19.77 -156.05 ;...
+19.95 -155.87 ;...
+20.27 -155.88 ];
+
+coastlat=coast(:,1);
+coastlon=coast(:,2);
+
+%}}}
 if nargin==2, 
 	if strcmpi(class(varargin{1}),'pairoptions'),
@@ -10,11 +9881,6 @@
 end
 
-%retrieve some variables: 
-
-
-load ./Data/coastlines
 [x,y,z]=AboveGround(coastlat,coastlon,mesh.r(1),1000);
 hold on, p=plot3(x,y,z,'k-'); 
 set(p,'Color',getfieldvalue(options,'coast_color','k'));
 set(p,'LineWidth',getfieldvalue(options,'coast_linewidth',1));
-
Index: /issm/trunk/src/m/plot/plot_contour.py
===================================================================
--- /issm/trunk/src/m/plot/plot_contour.py	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_contour.py	(revision 24686)
@@ -34,5 +34,5 @@
     norm = options.getfieldvalue('colornorm')
     colors = options.getfieldvalue('contourcolors', 'y')
-    linestyles = options.getfieldvalue('contourlinestyles', ' - ')
+    linestyles = options.getfieldvalue('contourlinestyles', '-')
     linewidths = options.getfieldvalue('contourlinewidths', 1)
 
Index: /issm/trunk/src/m/plot/plot_landsat.m
===================================================================
--- /issm/trunk/src/m/plot/plot_landsat.m	(revision 24686)
+++ /issm/trunk/src/m/plot/plot_landsat.m	(revision 24686)
@@ -0,0 +1,152 @@
+% Explain
+%  This funtion loads Landsat Image Mosaic Antarctica (LIMA) for background image.
+%
+% Usage
+%  plot_landsat(md,data,options,plotlines,plotcols,i),
+%
+function plot_landsat(md,data,options,plotlines,plotcols,i),
+
+%process mesh and data
+[x2d y2d z2d elements2d is2d isplanet]=processmesh(md,[],options);
+[data datatype]=processdata(md,data,options);
+
+%check is2d
+if ~is2d,
+   error('buildgridded error message: gridded not supported for 3d meshes, project on a layer');
+end
+
+%Get some options
+transparency = getfieldvalue(options,'transparency',.3);
+highres =  getfieldvalue(options,'highres',0);
+
+% get xlim, and ylim
+xlim=getfieldvalue(options,'xlim',[min(x2d) max(x2d)])/getfieldvalue(options,'unit',1);
+ylim=getfieldvalue(options,'ylim',[min(y2d) max(y2d)])/getfieldvalue(options,'unit',1);
+
+if md.mesh.epsg == 3031 & (isempty(md.radaroverlay.pwr) | isempty(md.radaroverlay.x) | isempty(md.radaroverlay.y) | length(size(md.radaroverlay.pwr)) < 3), % Antarctica region {{{
+	if highres, 
+		disp('   LIMA with geotiff'), % {{{
+		disp('WARNING : this image shoud be collected with geocoded tif file');
+		% find merged mosaic landsat image {{{
+		limapath = {'/drive/project_inwoo/issm/Data/LIMA/AntarcticaLandsat.tif'};
+		pos = zeros(length(limapath),1);
+		for ii = 1:length(limapath)
+			if exist(limapath{ii}), pos(ii) = 1; end
+		end
+		limapath = limapath{find(pos)};
+		fprintf('   LIMA path is %s\n', limapath);
+		% }}}
+
+		% read image
+		im = imread(limapath);
+
+		% Region of LIMA data set
+		info = gdalinfo(limapath); % get geotiff info
+		xm = info.xmin + info.dx*[0:info.nx-1];
+		ym = info.ymax - info.dy*[0:info.ny-1];
+
+		% find region of model at LIMA
+		offset = 1e+4;
+		posx = find((xm > xlim(1)-offset).* (xm < xlim(2)+offset));
+		posy = find((ym > ylim(1)-offset).* (ym < ylim(2)+offset));
+		% }}}
+	else
+		disp('   LIMA with reduced tiff'),
+		% find merged mosaic landsat image {{{
+		limapath = {'/drive/project_inwoo/issm/Data/LIMA/tiff_90pct/00000-20080319-092059124.tif'};
+		pos = zeros(length(limapath),1);
+		for ii = 1:length(limapath)
+			if exist(limapath{ii}), pos(ii) = 1; end
+		end
+		
+		if sum(pos) == 0,
+			fprintf('download website : https://lima.usgs.gov/fullcontinent.php\n');
+			error('Landsat image at Antarctic region should be downloaded at above website');
+		end
+		limapath = limapath{find(pos)};
+		fprintf('   LIMA path is %s\n', limapath);
+		% }}}
+
+		% read image
+		im = imread(limapath);
+
+		% Region of LIMA data set
+		info = gdalinfo(limapath); % get geotiff info
+		xm = info.xmin + info.dx*[0:info.nx-1];
+		ym = info.ymax - info.dy*[0:info.ny-1];
+
+		% find region of model at LIMA
+		offset = 1e+4;
+		posx = find((xm > xlim(1)-offset).* (xm < xlim(2)+offset));
+		posy = find((ym > ylim(1)-offset).* (ym < ylim(2)+offset));
+		% }}}
+	end
+
+	% update region of radaroverlay
+	md.radaroverlay.x = xm(posx);
+	md.radaroverlay.y = ym(posy);
+	md.radaroverlay.pwr = im(posy, posx,:);
+	% }}}
+elseif length(size(md.radaroverlay.pwr)) == 3
+	% it already contains LIMA image.
+elseif md.mesh.epsg == 3431 & (isempty(md.radaroverlay.pwr) | isempty(md.radaroverlay.x) | isempty(md.radaroverlay.y) | length(size(md.radaroverlay.pwr)) < 3), % Greenladn region 
+	error('Greenland region is not yet available.');
+else
+	error('Check md.mesh.epsg, available Landsat regeion is at Antarctica (EPSG:3031)');
+end
+
+%Process image from model
+final = double(md.radaroverlay.pwr)/double(max(md.radaroverlay.pwr(:))); %rescale between 0 and 1
+
+%Prepare grid
+if size(md.radaroverlay.x,1)==1 | size(md.radaroverlay.x,2)==1,
+   x_m = md.radaroverlay.x;
+   y_m = md.radaroverlay.y;
+   data_grid=InterpFromMeshToGrid(elements2d,x2d/getfieldvalue(options,'unit',1),y2d/getfieldvalue(options,'unit',1),data,x_m,y_m,NaN);
+   %data_grid=InterpFromMeshToGrid(md.mesh.elements,md.mesh.x/getfieldvalue(options,'unit',1),md.mesh.y/getfieldvalue(options,'unit',1),data,x_m,y_m,NaN);
+else
+   X = md.radaroverlay.x;
+   Y = md.radaroverlay.y;
+   data_grid=InterpFromMeshToMesh2d(elements2d,x2d,y2d,data,X(:),Y(:),'default',NaN); data_grid=reshape(data_grid,size(X));
+   %data_grid=InterpFromMeshToMesh2d(md.mesh.elements,md.mesh.x,md.mesh.y,data,X(:),Y(:),'default',NaN); data_grid=reshape(data_grid,size(X));
+   x_m=X(1,:); y_m=Y(:,1);
+end
+
+data_nan=isnan(data_grid);
+if exist(options,'caxis'),
+   caxis_opt=getfieldvalue(options,'caxis');
+   data_grid(find(data_grid<caxis_opt(1)))=caxis_opt(1);
+   data_grid(find(data_grid>caxis_opt(2)))=caxis_opt(2);
+   data_min=caxis_opt(1);
+   data_max=caxis_opt(2);
+else
+   data_min=min(data_grid(:));
+   data_max=max(data_grid(:));
+end
+colorm = getcolormap(options);
+image_rgb = ind2rgb(uint16((data_grid - data_min)*(length(colorm)/(data_max-data_min))),colorm);
+
+alpha=ones(size(data_grid));
+alpha(find(~data_nan))=transparency;
+alpha=repmat(alpha,[1 1 3]);
+
+final=alpha.*final+(1-alpha).*image_rgb;
+
+%Select plot area 
+subplotmodel(plotlines,plotcols,i,options);
+
+h=imagesc(x_m*getfieldvalue(options,'unit',1),y_m*getfieldvalue(options,'unit',1),final);
+
+%last step: mesh gridded?
+if exist(options,'edgecolor'),
+	A=elements(:,1); B=elements(:,2); C=elements(:,3); 
+	patch('Faces',[A B C],'Vertices', [x y z],'FaceVertexCData',data_grid(1)*ones(size(x)),'FaceColor','none','EdgeColor',getfieldvalue(options,'edgecolor'));
+end
+
+%Apply options
+if ~isnan(data_min),
+	options=changefieldvalue(options,'caxis',[data_min data_max]); % force caxis so that the colorbar is ready
+end
+options=addfielddefault(options,'axis','xy equal off'); % default axis
+applyoptions(md,data,options);
+end
Index: /issm/trunk/src/m/plot/plot_manager.m
===================================================================
--- /issm/trunk/src/m/plot/plot_manager.m	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_manager.m	(revision 24686)
@@ -166,4 +166,10 @@
 
 %Figure out if this is a semi-transparent plot.
+if getfieldvalue(options,'landsat',0),
+	plot_landsat(md,data,options,nlines,ncols,i);
+	return;
+end
+
+%Figure out if this is a semi-transparent plot.
 if exist(options,'gridded'),
 	plot_gridded(md,data,options,nlines,ncols,i);
Index: /issm/trunk/src/m/plot/plot_scatter.m
===================================================================
--- /issm/trunk/src/m/plot/plot_scatter.m	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_scatter.m	(revision 24686)
@@ -62,5 +62,5 @@
 		ylim = h.Limits;
 	end
-	palette=colormap;
+	palette=colormap();
 	numcolors=size(palette,1);
 	levels=round_ice(linspace(ylim(1),ylim(2),numcolors+1),2);
Index: /issm/trunk/src/m/plot/plot_unit.m
===================================================================
--- /issm/trunk/src/m/plot/plot_unit.m	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_unit.m	(revision 24686)
@@ -76,4 +76,8 @@
 		end
 
+	%edge data
+	case 6
+		A=elements(:,1); B=elements(:,2); C=elements(:,3); 
+		patch('Faces', [A B C],'Vertices', [x y z],'FaceVertexCData', data(:),'FaceColor','interp','EdgeColor',edgecolor);
 	otherwise,
 		error(['case ' num2str(datatype) ' not supported']);
Index: /issm/trunk/src/m/plot/plot_unit.py
===================================================================
--- /issm/trunk/src/m/plot/plot_unit.py	(revision 24685)
+++ /issm/trunk/src/m/plot/plot_unit.py	(revision 24686)
@@ -68,13 +68,22 @@
     # }}}
     # {{{ Get the colormap limits
+    dataspread = np.nanmax(data) - np.nanmin(data)
+    if dataspread != 0.:
+        limextent = np.abs(dataspread / np.nanmean(data))
+    else:
+        limextent = 0.
+
     if options.exist('clim'):
-        lims = options.getfieldvalue('clim', [np.amin(data), np.amax(data)])
+        lims = options.getfieldvalue('clim', [np.nanmin(data), np.nanmax(data)])
     elif options.exist('caxis'):
-        lims = options.getfieldvalue('caxis', [np.amin(data), np.amax(data)])
-    else:
-        if np.amin(data) == np.amax(data):
-            lims = [np.amin(data) * 0.9, np.amax(data) * 1.1]
-        else:
-            lims = [np.amin(data), np.amax(data)]
+        lims = options.getfieldvalue('caxis', [np.nanmin(data), np.nanmax(data)])
+    else:
+        if limextent == 0.:
+            delta = abs(0.1 * np.nanmin(data))
+            lims = [np.nanmin(data) - delta, np.nanmax(data) + delta]
+        elif limextent < 1.0e-12:
+            lims = [np.nanmin(data) - 2 * dataspread, np.nanmax(data) + 2 * dataspread]
+        else:
+            lims = [np.nanmin(data), np.nanmax(data)]
     # }}}
     # {{{ Set the spread of the colormap (default is normal
@@ -103,6 +112,6 @@
             #first deal with colormap
             loccmap = plt.cm.ScalarMappable(cmap=cmap)
-            loccmap.set_array([np.nanmin(data), np.nanmax(data)])
-            loccmap.set_clim(vmin=np.nanmin(data), vmax=np.nanmax(data))
+            loccmap.set_array(lims)
+            loccmap.set_clim(vmin=lims[0], vmax=lims[1])
 
     #dealing with prism sides
@@ -173,6 +182,6 @@
             #first deal with the colormap
             loccmap = plt.cm.ScalarMappable(cmap=cmap)
-            loccmap.set_array([np.nanmin(data), np.nanmax(data)])
-            loccmap.set_clim(vmin=np.nanmin(data), vmax=np.nanmax(data))
+            loccmap.set_array(lims)
+            loccmap.set_clim(vmin=lims[0], vmax=lims[1])
 
     #deal with prism sides
Index: /issm/trunk/src/m/qmu/gaussian_pdf.m
===================================================================
--- /issm/trunk/src/m/qmu/gaussian_pdf.m	(revision 24686)
+++ /issm/trunk/src/m/qmu/gaussian_pdf.m	(revision 24686)
@@ -0,0 +1,3 @@
+function pdf=gaussian_pdf(x,average,sigma)
+
+pdf=1/(sqrt(2*pi*sigma^2))*exp(-(x-average).^2/(2*sigma^2));
Index: /issm/trunk/src/m/qmu/lognormal_pdf.m
===================================================================
--- /issm/trunk/src/m/qmu/lognormal_pdf.m	(revision 24686)
+++ /issm/trunk/src/m/qmu/lognormal_pdf.m	(revision 24686)
@@ -0,0 +1,4 @@
+function pdf=lognormal_pdf(x,mu,sigma)
+
+pdf=1./(x*sigma*sqrt(2*pi)) .* exp( -(log(x)-mu).^2/(2*sigma^2)  );
+
Index: /issm/trunk/src/m/qmu/postqmu.m
===================================================================
--- /issm/trunk/src/m/qmu/postqmu.m	(revision 24685)
+++ /issm/trunk/src/m/qmu/postqmu.m	(revision 24686)
@@ -1,4 +1,4 @@
 function md=postqmu(md)
-%INPUT function md=postqmu(md,qmufile,qmudir)
+%INPUT function md=postqmu(md)
 %Deal with dakota output results in files.
 
@@ -22,10 +22,4 @@
 qmuinfile=[md.miscellaneous.name '.qmu.in'];
 qmuoutfile=[md.miscellaneous.name '.qmu.out'];
-
-%[method,dvar,dresp_in]=dakota_in_parse(qmuinfile);
-%dakotaresults.method   =method;
-%dakotaresults.dvar     =dvar;
-%dakotaresults.dresp_in =dresp_in;
-
 [method,dresp_out,scm,pcm,srcm,prcm]=dakota_out_parse(qmuoutfile);
 dakotaresults.dresp_out=dresp_out;
@@ -40,4 +34,16 @@
 end
 
+if md.qmu.output,
+	if strcmpi(md.qmu.method.method,'nond_sampling'),
+		dakotaresults.modelresults={};
+		md2=md; md2.qmu.isdakota=0;
+		for i=1:md2.qmu.method.params.samples,
+			md2=loadresultsfromdisk(md2,[md2.miscellaneous.name '.outbin.' num2str(i)]);
+			dakotaresults.modelresults{end+1}=md2.results;
+		end
+	end
+end
+
+
 %put dakotaresults in their right location.
 md.results.dakota=dakotaresults;
Index: /issm/trunk/src/m/qmu/postqmu.py
===================================================================
--- /issm/trunk/src/m/qmu/postqmu.py	(revision 24685)
+++ /issm/trunk/src/m/qmu/postqmu.py	(revision 24686)
@@ -6,12 +6,11 @@
 
 
-def postqmu(md, qmufile, qmudir='qmu' + str(getpid())):
+def postqmu(md):
     '''
     Deal with dakota output results in files.
 
     INPUT function
-    md = postqmu(md, qmufile, qmudir)
+    md = postqmu(md)
 
-    By default: qmudir = 'qmu' + pid (eg. 'qmu2189')
     '''
 
@@ -24,16 +23,9 @@
             print(fline)
 
-        raise RuntimeError('Dakota returned error in ' + str(qmuerrfile) + ' file.  ' + str(qmudir) + ' directory retained.')
+        raise RuntimeError('Dakota returned error in ' + str(qmuerrfile) + ' file.')
 
     # parse inputs and results from dakota
     qmuinfile = str(md.miscellaneous.name) + '.qmu.in'
     qmuoutfile = str(md.miscellaneous.name) + '.qmu.out'
-
-    # unused and unimplemented
-    #[method, dvar, dresp_in] = dakota_in_parse(qmuinfile)
-    #dakotaresults.method   =method
-    #dakotaresults.dvar     =dvar
-    #dakotaresults.dresp_in =dresp_in
-
     [method, dresp_out, scm, pcm, srcm, prcm] = dakota_out_parse(qmuoutfile)
     dakotaresults = struct()
Index: /issm/trunk/src/m/qmu/preqmu.m
===================================================================
--- /issm/trunk/src/m/qmu/preqmu.m	(revision 24685)
+++ /issm/trunk/src/m/qmu/preqmu.m	(revision 24686)
@@ -6,5 +6,4 @@
 %   options come from the solve.m routine. They can include Dakota options:
 %
-%       qmudir:  any directory where to run the qmu analysis
 %       qmufile: input file for Dakota
 %       ivar: selection number for variables input (if several are specified in variables)
@@ -12,10 +11,7 @@
 %       imethod: same thing for methods
 %       iparams: same thing for params
-%       overwrite: overwrite qmudir before analysis
 
 disp('preprocessing dakota inputs');
-qmudir    = getfieldvalue(options,'qmudir',['qmu' num2str(feature('GetPid'))]);  % qmudir = ['qmu_' datestr(now,'yyyymmdd_HHMMSS')];
-qmufile   = getfieldvalue(options,'qmufile','qmu');% qmufile cannot be changed unless ????script.sh is also changed
-overwrite = getfieldvalue(options,'overwrite','n');
+qmufile   = getfieldvalue(options,'qmufile','qmu');
 ivar      = getfieldvalue(options,'ivar',1);
 iresp     = getfieldvalue(options,'iresp',1);
@@ -23,18 +19,6 @@
 iparams   = getfieldvalue(options,'iparams',1);
 
-%first create temporary directory in which we will work
-if strncmpi(overwrite,'y',1)
-	system(['rm -rf ' qmudir '/*']); 
-else
-	%does the directory exist? if so, then error out
-	if exist(qmudir)==7,
-		error('Existing ''%s'' directory, cannot overwrite. Specify ''overwrite'',''y'' option in solve arguments.',options.qmudir);
-	end
-end
-mkdir(qmudir)
-cd(qmudir)
-
 %when running in library mode, the in file needs to be called md.miscellaneous.name.qmu.in
-qmufile=[md.miscellaneous.name ];
+qmufile=[md.miscellaneous.name];
 
 %retrieve variables and resposnes for this particular analysis.
@@ -46,5 +30,5 @@
 responses=expandresponses(md,responses);
 
-%go through variables and responses, and check they don't have more than md.qmu.numberofpartitions values. Also determine numvariables and numresponses{{{
+%go through variables and responses, and check they don't have more than md.qmu.numberofpartitions values. Also determine numvariables and numresponses
 numvariables=0;
 variable_fieldnames=fieldnames(variables);
@@ -72,11 +56,9 @@
 	numresponses=numresponses+numel(responses.(field_name));
 end
-%}}}}
 
 %create in file for dakota
 dakota_in_data(md.qmu.method(imethod),variables,responses,md.qmu.params(iparams),qmufile);
-system(['rm -rf ' md.miscellaneous.name '.m']);
 
-%build a list of variables and responses descriptors. the list is not expanded. {{{
+%build a list of variables and responses descriptors. the list is not expanded.
 variabledescriptors={};
 variable_fieldnames=fieldnames(md.qmu.variables(ivar));
@@ -98,5 +80,4 @@
 	end
 end
-%}}}
 
 %register the fields that will be needed by the Qmu model.
Index: /issm/trunk/src/m/qmu/preqmu.py
===================================================================
--- /issm/trunk/src/m/qmu/preqmu.py	(revision 24685)
+++ /issm/trunk/src/m/qmu/preqmu.py	(revision 24686)
@@ -15,5 +15,4 @@
    options come from the solve.py routine. They can include Dakota options:
 
-    qmudir:  any directory where to run the qmu analysis
     qmufile: input file for Dakota
 
@@ -23,31 +22,13 @@
     imethod: same thing for methods
     iparams: same thing for params
-
-    overwrite: overwrite qmudir before analysis
 '''
 
     print('preprocessing dakota inputs')
-    qmudir = options.getfieldvalue('qmudir', 'qmu' + str(os.getpid()))
-    # qmudir = ['qmu_' datestr(now, 'yyyymmdd_HHMMSS')]
     qmufile = options.getfieldvalue('qmufile', 'qmu')
     # qmufile cannot be changed unless ????script.sh is also changed
-    overwrite = options.getfieldvalue('overwrite', 'n')
     options.addfielddefault('ivar', 0)
     options.addfielddefault('iresp', 0)
     options.addfielddefault('imethod', 0)
     options.addfielddefault('iparams', 0)
-
-    # first create temporary directory in which we will work
-    if strncmpi(overwrite, 'y', 1):
-        os.system('rm -rf ' + qmudir + '/* ')
-    else:
-        # does the directory exist? if so, then error out
-        if os.path.isdir(qmudir):
-            raise RuntimeError('Existing ' + str(options.qmudir) + ' directory, cannot overwrite. Specify "overwrite", "y" option in solve arguments.')
-
-    # os.makedirs() raises error when dir exists, matlab's mkdir() does not
-    if not os.path.isdir(qmudir):
-        os.makedirs(qmudir)
-    os.chdir(qmudir)
 
     # when running in library mode, the in file needs to be called md.miscellaneous.name.qmu.in
@@ -98,9 +79,4 @@
     dakota_in_data(md.qmu.method, variables, responses, md.qmu.params, qmufile)
 
-    #====================================================================================  #
-    #REMOVED FOR DEBUGGING ONLY:
-    #os.system('rm -rf ' + str(md.miscellaneous.name) + '.py')
-    #====================================================================================  #
-
     # build a list of variables and responses descriptors. the list is not expanded.
     #{{{
Index: sm/trunk/src/m/qmu/process_qmu_options.m
===================================================================
--- /issm/trunk/src/m/qmu/process_qmu_options.m	(revision 24685)
+++ 	(revision )
@@ -1,100 +1,0 @@
-function outoptions=process_qmu_options(options)
-%PROCESS_QMU_OPTIONS - set up default options for qmu phase
-%
-%   Usage:
-%      options=process_qmu_options(options)
-%
-%   See also: QMU,RECOVER_QMU_OPTIONS
-
-%analysis_type: check on this option, error out otherwise
-found=0;
-for i=1:size(options,1),
-	if strcmpi(options{i,1},'analysis_type'),
-		analysis_type=options{i,2};
-		found=1;
-	end
-end
-if ~found,
-	error('recover_qmu_options error message: no ''analysis_type'' was provided');
-end
-
-%package: is there one? default to ''JPL''
-found=0;
-for i=1:size(options,1),
-	if strcmpi(options{i,1},'package'),
-		package=options{i,2};
-		found=1;
-	end
-end
-if ~found,
-	disp('recover_qmu_options info message: no ''package'' was provided, defaulting to ''JPL''');
-	options(end+1,:)={'package' 'JPL'};
-	package='JPL';
-end
-
-if ~ischar(package), 
-	error(['process_qmu_options error message: package ' package ' not supported yet']);
-end
-
-%check solution type is supported
-if ~(strcmpi(analysis_type,'control') |  ...
-		strcmpi(analysis_type,'stressbalance') |  ...
-		strcmpi(analysis_type,'masstransport') |  ...
-		strcmpi(analysis_type,'thermal') |  ...
-		strcmpi(analysis_type,'parameters') |  ...
-		strcmpi(analysis_type,'transient') ),
-	error(['process_qmu_options error message: analysis_type ' analysis_type ' not supported yet!']);
-end
-
-%  process qmu arguments
-
-%first, the defaults
-qmudir ='qmu';% qmudir =['qmu_' datestr(now,'yyyymmdd_HHMMSS')];
-qmufile='qmu';
-ivar   =1;
-iresp  =1;
-imethod=1;
-iparams=1;
-runmpi =false;
-
-for i=1:size(options,1),
-	switch options{i,1},
-	case 'qmudir'
-		qmudir=options{i,2};
-	case 'qmufile'
-		qmufile=options{i,2};
-	case 'ivar'
-		ivar=options{i,2};
-	case 'iresp'
-		iresp=options{i,2};
-	case 'imethod'
-		imethod=options{i,2};
-	case 'iparams'
-		iparams=options{i,2};
-	case 'overwrite'
-		outoptions.overwrite=options{i,2};
-	case 'keep'
-		outoptions.keep=options{i,2};
-	case 'outfiles'
-		outoptions.outfiles=options{i,2};
-	case 'rstfile'
-		outoptions.rstfile=options{i,2}; 
-	case 'rundakota'
-		outoptions.rundakota=options{i,2};
-	case 'runmpi'
-		runmpi=options{i,2};
-	otherwise
-		%nothing
-	end
-end
-
-%setup final options structure
-outoptions.analysis_type=analysis_type;
-outoptions.package=package;
-outoptions.qmudir=qmudir;
-outoptions.qmufile=qmufile;
-outoptions.ivar=ivar;
-outoptions.iresp=iresp;
-outoptions.imethod=imethod;
-outoptions.iparams=iparams;
-outoptions.runmpi=runmpi;
Index: /issm/trunk/src/m/qmu/vlist_write.m
===================================================================
--- /issm/trunk/src/m/qmu/vlist_write.m	(revision 24685)
+++ /issm/trunk/src/m/qmu/vlist_write.m	(revision 24686)
@@ -19,4 +19,7 @@
 pstype =[];
 pscale =[];
+pabscissas =[];
+ppairs_per_variable =[];
+pcounts=[];
 pdesc  =[];
 
@@ -32,4 +35,7 @@
     pstype =[pstype  prop_stype(dvar.(fnames{i})) ];
     pscale =[pscale  prop_scale(dvar.(fnames{i})) ];
+    ppairs_per_variable =[ppairs_per_variable  prop_pairs_per_variable(dvar.(fnames{i})) ];
+    pabscissas =[pabscissas  prop_abscissas(dvar.(fnames{i})) ];
+    pcounts =[pcounts  prop_counts(dvar.(fnames{i})) ];
     pdesc  =[pdesc   prop_desc(dvar.(fnames{i}),fnames{i})];
 end
@@ -73,6 +79,21 @@
     vector_write(fidi,sprintf('\t    '),pscale ,6,76);
 end
+if ~isempty(ppairs_per_variable)
+    %fprintf(fidi,'\t  %s_pairs_per_variable =\n',cstring2);
+    fprintf(fidi,'\t  pairs_per_variable =\n');
+    vector_write(fidi,sprintf('\t    '),ppairs_per_variable ,6,76);
+end
+if ~isempty(pabscissas)
+    %fprintf(fidi,'\t  %s_abscissas =\n',cstring2);
+    fprintf(fidi,'\t  abscissas =\n');
+    vector_write(fidi,sprintf('\t    '),pabscissas ,6,76);
+end
+if ~isempty(pcounts)
+    %fprintf(fidi,'\t  %s_counts =\n',cstring2);
+    fprintf(fidi,'\t  counts =\n');
+    vector_write(fidi,sprintf('\t    '),pcounts ,6,76);
+end
 if ~isempty(pdesc)
-    fprintf(fidi,'\t  %s_descriptors =\n',cstring2);
+    fprintf(fidi,'\t  descriptors =\n');
     vector_write(fidi,sprintf('\t    '),pdesc  ,6,76);
 end
Index: /issm/trunk/src/m/shp/shpconcat.m
===================================================================
--- /issm/trunk/src/m/shp/shpconcat.m	(revision 24686)
+++ /issm/trunk/src/m/shp/shpconcat.m	(revision 24686)
@@ -0,0 +1,20 @@
+function shpconcat(domain,holes,filename)
+%SHPWRITE - concat a shape file from a domain and holes
+%
+%   Usage:
+%      shpconcat(domain,holes,filename)
+%
+%   Example:
+%      shpconcat(domain,holes,'domainoutline.shp')
+%
+%   See also SHPREAD,SHPWRITE
+
+
+	merged=domain;
+	merged.Geometry='Polygon';
+
+	for i=1:length(holes),
+		merged(end+1)=struct('x',holes(i).x,'y',holes(i).y,'Geometry','Polygon');
+	end
+
+	shpwrite(merged,filename);
Index: /issm/trunk/src/m/shp/shpdisp3d.m
===================================================================
--- /issm/trunk/src/m/shp/shpdisp3d.m	(revision 24686)
+++ /issm/trunk/src/m/shp/shpdisp3d.m	(revision 24686)
@@ -0,0 +1,52 @@
+function shpdisp3d(domainoutline,varargin)
+%SHPDISP - plot the contours of a domain outline file on a globe in 3d
+%
+%   This routine reads in a domain outline file (Shape format) and plots all the contours on a 3D rendition of the earth.
+%
+%   Usage:
+%      shpdisp3d(domainoutline,varargin)
+%      shpdisp3d(filenamei,'figure',1,'style',stylei,'linewidth',linewidthi);
+%
+%   Example:
+%      shpdisp3d('Domain.shp','figure',1,'style','--r','linewidthi',2);
+%
+%   See also SHPREAD, SHPDOC, SHPDISP
+
+%recover options
+options=pairoptions(varargin{:});
+
+%parse input:
+figurenumber=getfieldvalue(options,'figure',1);
+color=getfieldvalue(options,'color','r');
+linewidth=getfieldvalue(options,'linewidth',1);
+unitmultiplier=getfieldvalue(options,'unit',1);
+epsg=getfieldvalue(options,'epsg',4326);
+radius=getfieldvalue(options,'radius',6371012);
+aboveground=getfieldvalue(options,'aboveground',1000)
+
+%read domain:
+domain=shpread(domainoutline);
+
+if epsg~=4326,
+	%transform to lat,long:
+	for i=1:length(domain),
+		[x,y] = gdaltransform(domain(i).x,domain(i).y,'EPSG:4326',sprintf('EPSG:%i',epsg));
+		domain(i).x=x; domain(i).y=y;
+	end
+end
+
+for i=1:length(domain),
+
+	%make sure lat,long are what they are supposed to be: 
+	if any(domain(i).x>90 | domain(i).x<-90), 
+		long=domain(i).x; lat=domain(i).y;
+	else
+		long=domain(i).y; lat=domain(i).x;
+	end
+
+	%project on x,y,z reference frame.
+	[x,y,z]=AboveGround(lat,long,radius,aboveground);
+	hold on, p=plot3(x,y,z,'k-'); 
+	set(p,'Color',color);
+	set(p,'LineWidth',linewidth);
+end
Index: /issm/trunk/src/m/solve/loadresultsfromcluster.m
===================================================================
--- /issm/trunk/src/m/solve/loadresultsfromcluster.m	(revision 24685)
+++ /issm/trunk/src/m/solve/loadresultsfromcluster.m	(revision 24686)
@@ -30,4 +30,11 @@
 		end
 	end
+	if md.qmu.output,
+		if strcmpi(md.qmu.method.method,'nond_sampling'),
+			for i=1:md.qmu.method.params.samples
+				filelist{end+1}=[md.miscellaneous.name '.outbin.' num2str(i)];
+			end
+		end
+	end
 else
 	filelist{end+1}=[md.miscellaneous.name '.outbin'];
@@ -39,16 +46,12 @@
 
 %erase the log and output files
-if md.qmu.isdakota,
-	delete([['qmu' num2str(feature('GetPid')) '/'] md.miscellaneous.name '.outlog']);
-	delete([['qmu' num2str(feature('GetPid')) '/']  md.miscellaneous.name '.errlog']);
-else
-	if ~nolog,
-		delete([md.miscellaneous.name '.outlog']);
-		delete([md.miscellaneous.name '.errlog']);
+for i=1:numel(filelist)
+	filename = filelist{i};
+	if exist(filename)
+		delete(filename)
 	end
-	delete([md.miscellaneous.name '.outbin']);
-	if exist([md.private.runtimename '.tar.gz']) & ~ispc(),
-		delete([md.private.runtimename '.tar.gz']);
-	end
+end
+if exist([md.private.runtimename '.tar.gz']) & ~ispc(),
+	delete([md.private.runtimename '.tar.gz']);
 end
 
@@ -56,15 +59,13 @@
 hostname=oshostname();
 if strcmpi(hostname,cluster.name),
-	if md.qmu.isdakota,
-		delete([['qmu' num2str(feature('GetPid')) '/'] md.miscellaneous.name '.bin']);
-		delete([['qmu' num2str(feature('GetPid')) '/'] md.miscellaneous.name '.queue']);
+	delete([md.miscellaneous.name '.bin']);
+	delete([md.miscellaneous.name '.toolkits']);
+	if md.qmu.isdakota
+		delete([md.miscellaneous.name '.qmu.in']);
+	end
+	if ~ispc(),
+		delete([md.miscellaneous.name '.queue']);
 	else
-		delete([md.miscellaneous.name '.bin']);
-		delete([md.miscellaneous.name '.toolkits']);
-		if ~ispc(),
-			delete([md.miscellaneous.name '.queue']);
-		else
-			delete([md.miscellaneous.name '.bat']);
-		end
+		delete([md.miscellaneous.name '.bat']);
 	end
 end
Index: /issm/trunk/src/m/solve/loadresultsfromcluster.py
===================================================================
--- /issm/trunk/src/m/solve/loadresultsfromcluster.py	(revision 24685)
+++ /issm/trunk/src/m/solve/loadresultsfromcluster.py	(revision 24686)
@@ -77,11 +77,5 @@
         TryRem('.bin', filename)
 
-    #cwd = os.getcwd().split('/')[-1]
-    if md.qmu.isdakota:
-        os.chdir('..')
-    #TryRem('', cwd)
-
     return md
-
 
 def TryRem(extension, filename):
Index: /issm/trunk/src/m/solve/loadresultsfromdisk.m
===================================================================
--- /issm/trunk/src/m/solve/loadresultsfromdisk.m	(revision 24685)
+++ /issm/trunk/src/m/solve/loadresultsfromdisk.m	(revision 24686)
@@ -60,4 +60,3 @@
 else
 	md=postqmu(md);
-	cd ..
 end
Index: /issm/trunk/src/m/solve/loadresultsfromdisk.py
===================================================================
--- /issm/trunk/src/m/solve/loadresultsfromdisk.py	(revision 24685)
+++ /issm/trunk/src/m/solve/loadresultsfromdisk.py	(revision 24686)
@@ -58,5 +58,5 @@
     #post processes qmu results if necessary
     else:
-        md = postqmu(md, filename)
+        md = postqmu(md)
 
     return md
Index: /issm/trunk/src/m/solve/solve.m
===================================================================
--- /issm/trunk/src/m/solve/solve.m	(revision 24685)
+++ /issm/trunk/src/m/solve/solve.m	(revision 24686)
@@ -107,6 +107,5 @@
 end
 
-%if running qmu analysis, some preprocessing of dakota files using models
-%fields needs to be carried out. 
+%if running qmu analysis, some preprocessing of dakota files using models fields needs to be carried out. 
 if md.qmu.isdakota,
 	md=preqmu(md,options);
@@ -172,9 +171,2 @@
 	 disp('Model results must be loaded manually with md=loadresultsfromcluster(md);');
 end
-
-%post processes qmu results if necessary
-if md.qmu.isdakota,
-	if ~strncmpi(getfieldvalue(options,'keep','y'),'y',1)
-		system(['rm -rf qmu' num2str(feature('GetPid'))]);
-	end
-end
Index: /issm/trunk/src/m/solve/solve.py
===================================================================
--- /issm/trunk/src/m/solve/solve.py	(revision 24685)
+++ /issm/trunk/src/m/solve/solve.py	(revision 24686)
@@ -153,8 +153,3 @@
             md = loadresultsfromcluster(md)
 
-    #post processes qmu results if necessary
-    if md.qmu.isdakota:
-        if not strncmpi(options.getfieldvalue('keep', 'y'), 'y', 1):
-            shutil.rmtree('qmu' + str(os.getpid()))
-
     return md
Index: /issm/trunk/src/m/solve/solveiceocean.m
===================================================================
--- /issm/trunk/src/m/solve/solveiceocean.m	(revision 24685)
+++ /issm/trunk/src/m/solve/solveiceocean.m	(revision 24686)
@@ -135,9 +135,2 @@
 	 disp('Model results must be loaded manually with md=loadresultsfromcluster(md);');
 end
-
-%post processes qmu results if necessary
-if md.qmu.isdakota,
-	if ~strncmpi(getfieldvalue(options,'keep','y'),'y',1)
-		system(['rm -rf qmu' num2str(feature('GetPid'))]);
-	end
-end
Index: /issm/trunk/src/m/solvers/bcgslbjacobioptions.m
===================================================================
--- /issm/trunk/src/m/solvers/bcgslbjacobioptions.m	(revision 24685)
+++ /issm/trunk/src/m/solvers/bcgslbjacobioptions.m	(revision 24686)
@@ -8,2 +8,4 @@
 solverOptions.ksp_type=getfieldvalue(options, 'ksp_type','bcgsl');
 solverOptions.pc_type=getfieldvalue(options, 'pc_type',  'bjacobi');
+solverOptions.ksp_max_it=getfieldvalue(options,'ksp_max_it',100);
+solverOptions.ksp_rtol=getfieldvalue(options,'ksp_rtol',1e-15);
Index: /issm/trunk/src/wrappers/MeshPartition/MeshPartition.cpp
===================================================================
--- /issm/trunk/src/wrappers/MeshPartition/MeshPartition.cpp	(revision 24685)
+++ /issm/trunk/src/wrappers/MeshPartition/MeshPartition.cpp	(revision 24686)
@@ -54,5 +54,5 @@
 	/*Run partitioning algorithm based on a "clever" use of the Metis partitioner: */
 	MeshPartitionx(&int_element_partitioning,&int_node_partitioning,numberofelements,numberofvertices,elements,
-		numberofelements2d,numberofvertices2d,elements2d,numberoflayers,elements_width,meshelementtype,numareas);
+		numberofelements2d,numberofvertices2d,elements2d,NULL,numberoflayers,elements_width,meshelementtype,numareas);
 
 	/*Post process node_partitioning and element_partitioning to be in double format. Metis needed them in int* format: */
Index: /issm/trunk/src/wrappers/matlab/Makefile.am
===================================================================
--- /issm/trunk/src/wrappers/matlab/Makefile.am	(revision 24685)
+++ /issm/trunk/src/wrappers/matlab/Makefile.am	(revision 24686)
@@ -5,5 +5,5 @@
 
 #define prefix (from http://www.gnu.org/software/autoconf/manual/autoconf-2.67/html_node/Defining-Directories.html)
-AM_CPPFLAGS+=  -DISSM_PREFIX='"$(prefix)"'
+AM_CPPFLAGS += -DISSM_PREFIX='"$(prefix)"'
 
 #matlab io{{{
@@ -12,12 +12,13 @@
 else
 noinst_LTLIBRARIES = libISSMMatlab.la
-lib_LTLIBRARIES = 
-endif
-
-io_sources= ./io/CheckNumMatlabArguments.cpp\
-				./io/WriteMatlabData.cpp\
-				./io/FetchMatlabData.cpp
-
-ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS) 
+lib_LTLIBRARIES =
+endif
+
+io_sources = \
+	./io/CheckNumMatlabArguments.cpp \
+	./io/FetchMatlabData.cpp \
+	./io/WriteMatlabData.cpp
+
+ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS)
 
 libISSMMatlab_la_SOURCES = $(io_sources)
@@ -37,28 +38,29 @@
 #}}}
 #Wrappers {{{
-lib_LTLIBRARIES += BamgMesher_matlab.la\
-						 BamgConvertMesh_matlab.la\
-						 BamgTriangulate_matlab.la\
-						 ContourToMesh_matlab.la\
-						 ContourToNodes_matlab.la\
-						 DistanceToMaskBoundary_matlab.la\
-						 ElementConnectivity_matlab.la\
-						 ExpSimplify_matlab.la\
-						 ExpToLevelSet_matlab.la\
-						 InterpFromGridToMesh_matlab.la\
-						 InterpFromMeshToMesh2d_matlab.la\
-						 InterpFromMeshToMesh3d_matlab.la\
-						 InterpFromMeshToGrid_matlab.la\
-						 InterpFromMesh2d_matlab.la\
-						 IssmConfig_matlab.la\
-						 NodeConnectivity_matlab.la\
-						 M1qn3_matlab.la\
-						 MeshPartition_matlab.la\
-						 MeshProfileIntersection_matlab.la\
-						 PointCloudFindNeighbors_matlab.la\
-						 PropagateFlagsFromConnectivity_matlab.la\
-						 Triangle_matlab.la\
-						 ProcessRifts_matlab.la\
-						 Scotch_matlab.la
+lib_LTLIBRARIES += \
+	BamgMesher_matlab.la \
+	BamgConvertMesh_matlab.la \
+	BamgTriangulate_matlab.la \
+	ContourToMesh_matlab.la \
+	ContourToNodes_matlab.la \
+	DistanceToMaskBoundary_matlab.la \
+	ElementConnectivity_matlab.la \
+	ExpSimplify_matlab.la \
+	ExpToLevelSet_matlab.la \
+	InterpFromGridToMesh_matlab.la \
+	InterpFromMeshToMesh2d_matlab.la \
+	InterpFromMeshToMesh3d_matlab.la \
+	InterpFromMeshToGrid_matlab.la \
+	InterpFromMesh2d_matlab.la \
+	IssmConfig_matlab.la \
+	NodeConnectivity_matlab.la \
+	M1qn3_matlab.la \
+	MeshPartition_matlab.la \
+	MeshProfileIntersection_matlab.la \
+	PointCloudFindNeighbors_matlab.la \
+	PropagateFlagsFromConnectivity_matlab.la \
+	Triangle_matlab.la \
+	ProcessRifts_matlab.la \
+	Scotch_matlab.la
 
 if CHACO
@@ -66,5 +68,5 @@
 endif
 if KRIGING
-lib_LTLIBRARIES +=  Kriging_matlab.la
+lib_LTLIBRARIES += Kriging_matlab.la
 endif
 if KML
@@ -74,5 +76,5 @@
 
 # Dependencies {{{
-deps =  $(MATHLIB)
+deps = $(MATHLIB)
 
 #Triangle library
@@ -81,15 +83,25 @@
 #Matlab part
 AM_LDFLAGS = -module $(MEXLINK) -shrext ${EXEEXT} --export-dynamic -rdynamic -no-undefined
-if VERSION
-AM_LDFLAGS +=
+
+if !VERSION
+AM_LDFLAGS += -avoid-version
+endif
+
+# NOTE:
+# - On Linux, We probably do not need the -static flag as long as we only
+#	generate static libraries for external packages. Dynamic system libraries
+#	will be linked to, whether we like it or not, if no static version is
+#	available.
+# - On macOS, static linking of binaries is not supported.
+#
+if STANDALONE_MODULES
+if MAC
+AM_LDFLAGS += -Wl,-rpath,'@rpath'
 else
-AM_LDFLAGS += -avoid-version
-endif
-
-if STANDALONE_MODULES
-AM_LDFLAGS += -Wl,-static
-endif
-
-AM_CXXFLAGS +=  -D_HAVE_MATLAB_MODULES_ -fPIC
+AM_LDFLAGS += -Wl,-static -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN'
+endif
+endif
+
+AM_CXXFLAGS += -D_HAVE_MATLAB_MODULES_ -fPIC
 
 deps += ./libISSMMatlab.la ../../c/libISSMModules.la ../../c/libISSMCore.la ./libISSMApi.la
@@ -111,81 +123,82 @@
 
 #Optimization flags:
-AM_CXXFLAGS += $(CXXOPTFLAGS) 
+AM_CXXFLAGS += $(CXXOPTFLAGS)
 #}}}
 # Module sources and dependencies {{{
 if !WINDOWS
-libISSMMatlab_la_LIBADD = ./../../c/libISSMCore.la ./../../c/libISSMModules.la $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(MKLLIB) $(GSLLIB) $(PROJ4LIB) $(MATHLIB) $(MEXLIB) 
+libISSMMatlab_la_LIBADD = ./../../c/libISSMCore.la ./../../c/libISSMModules.la $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(MKLLIB) $(GSLLIB) $(PROJ4LIB) $(MATHLIB) $(MEXLIB)
 endif
 
 if STANDALONE_LIBRARIES
-libISSMMatlab_la_LDFLAGS = -static 
-deps += $(PETSCLIB) $(TAOLIB) $(NEOPZLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(SCALAPACKLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB)   $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB)
-endif
-
-if !WINDOWS
-libISSMApi_la_LIBADD = $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(MATHLIB) $(MEXLIB)
-endif
-
-if STANDALONE_LIBRARIES
-libISSMApi_la_LDFLAGS = -static 
-endif
+libISSMMatlab_la_LDFLAGS = -static
+libISSMApi_la_LDFLAGS = -static
+deps += $(PETSCLIB) $(TAOLIB) $(NEOPZLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(SCALAPACKLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(DAKOTALIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB)
+endif
+
+if !WINDOWS
+libISSMApi_la_LIBADD = $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(MATHLIB) $(MEXLIB)
+endif
+
+BamgConvertMesh_matlab_la_SOURCES = ../BamgConvertMesh/BamgConvertMesh.cpp
+BamgConvertMesh_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+BamgConvertMesh_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 BamgMesher_matlab_la_SOURCES = ../BamgMesher/BamgMesher.cpp
 BamgMesher_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgMesher_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-BamgConvertMesh_matlab_la_SOURCES = ../BamgConvertMesh/BamgConvertMesh.cpp
-BamgConvertMesh_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgConvertMesh_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+BamgMesher_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 BamgTriangulate_matlab_la_SOURCES = ../BamgTriangulate/BamgTriangulate.cpp
 BamgTriangulate_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgTriangulate_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+BamgTriangulate_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 if CHACO
 Chaco_matlab_la_SOURCES = ../Chaco/Chaco.cpp
 Chaco_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-Chaco_matlab_la_LIBADD = ${deps} $(MPILIB) $(CHACOLIB) $(GSLLIB) $(PROJ4LIB) $(PETSCLIB) $(NEOPZLIB)
+Chaco_matlab_la_LIBADD = ${deps} $(CHACOLIB) $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 endif
 
 ContourToMesh_matlab_la_SOURCES = ../ContourToMesh/ContourToMesh.cpp
 ContourToMesh_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ContourToMesh_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+ContourToMesh_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+ContourToNodes_matlab_la_SOURCES = ../ContourToNodes/ContourToNodes.cpp
+ContourToNodes_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+ContourToNodes_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+DistanceToMaskBoundary_matlab_la_SOURCES = ../DistanceToMaskBoundary/DistanceToMaskBoundary.cpp
+DistanceToMaskBoundary_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+DistanceToMaskBoundary_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+ElementConnectivity_matlab_la_SOURCES = ../ElementConnectivity/ElementConnectivity.cpp
+ElementConnectivity_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+ElementConnectivity_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 ExpToLevelSet_matlab_la_SOURCES = ../ExpToLevelSet/ExpToLevelSet.cpp
 ExpToLevelSet_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ExpToLevelSet_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-ContourToNodes_matlab_la_SOURCES = ../ContourToNodes/ContourToNodes.cpp
-ContourToNodes_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ContourToNodes_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-DistanceToMaskBoundary_matlab_la_SOURCES = ../DistanceToMaskBoundary/DistanceToMaskBoundary.cpp
-DistanceToMaskBoundary_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-DistanceToMaskBoundary_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-ElementConnectivity_matlab_la_SOURCES = ../ElementConnectivity/ElementConnectivity.cpp
-ElementConnectivity_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ElementConnectivity_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+ExpToLevelSet_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+ExpSimplify_matlab_la_SOURCES = ../ExpSimplify/ExpSimplify.cpp
+ExpSimplify_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+ExpSimplify_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(NEOPZLIB)
 
 InterpFromGridToMesh_matlab_la_SOURCES = ../InterpFromGridToMesh/InterpFromGridToMesh.cpp
 InterpFromGridToMesh_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromGridToMesh_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+InterpFromGridToMesh_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+InterpFromMesh2d_matlab_la_SOURCES = ../InterpFromMesh2d/InterpFromMesh2d.cpp
+InterpFromMesh2d_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+InterpFromMesh2d_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+InterpFromMeshToGrid_matlab_la_SOURCES = ../InterpFromMeshToGrid/InterpFromMeshToGrid.cpp
+InterpFromMeshToGrid_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+InterpFromMeshToGrid_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 InterpFromMeshToMesh2d_matlab_la_SOURCES = ../InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.cpp
 InterpFromMeshToMesh2d_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToMesh2d_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+InterpFromMeshToMesh2d_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 InterpFromMeshToMesh3d_matlab_la_SOURCES = ../InterpFromMeshToMesh3d/InterpFromMeshToMesh3d.cpp
 InterpFromMeshToMesh3d_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToMesh3d_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-InterpFromMeshToGrid_matlab_la_SOURCES = ../InterpFromMeshToGrid/InterpFromMeshToGrid.cpp
-InterpFromMeshToGrid_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToGrid_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-InterpFromMesh2d_matlab_la_SOURCES = ../InterpFromMesh2d/InterpFromMesh2d.cpp
-InterpFromMesh2d_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMesh2d_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+InterpFromMeshToMesh3d_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 IssmConfig_matlab_la_SOURCES = ../IssmConfig/IssmConfig.cpp
@@ -193,50 +206,46 @@
 IssmConfig_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB)
 
-ExpSimplify_matlab_la_SOURCES = ../ExpSimplify/ExpSimplify.cpp
-ExpSimplify_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ExpSimplify_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(NEOPZLIB)
-
 Kriging_matlab_la_SOURCES = ../Kriging/Kriging.cpp
 Kriging_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-Kriging_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
+Kriging_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
+
+M1qn3_matlab_la_SOURCES = ../M1qn3/M1qn3.cpp
+M1qn3_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
+M1qn3_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(METISLIB) $(M1QN3LIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 MeshPartition_matlab_la_SOURCES = ../MeshPartition/MeshPartition.cpp
 MeshPartition_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-MeshPartition_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(METISLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-
-M1qn3_matlab_la_SOURCES = ../M1qn3/M1qn3.cpp
-M1qn3_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-M1qn3_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(METISLIB) $(M1QN3LIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+MeshPartition_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(METISLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 MeshProfileIntersection_matlab_la_SOURCES = ../MeshProfileIntersection/MeshProfileIntersection.cpp
 MeshProfileIntersection_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-MeshProfileIntersection_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+MeshProfileIntersection_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 NodeConnectivity_matlab_la_SOURCES = ../NodeConnectivity/NodeConnectivity.cpp
 NodeConnectivity_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-NodeConnectivity_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+NodeConnectivity_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 PointCloudFindNeighbors_matlab_la_SOURCES = ../PointCloudFindNeighbors/PointCloudFindNeighbors.cpp
 PointCloudFindNeighbors_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-PointCloudFindNeighbors_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+PointCloudFindNeighbors_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 PropagateFlagsFromConnectivity_matlab_la_SOURCES = ../PropagateFlagsFromConnectivity/PropagateFlagsFromConnectivity.cpp
 PropagateFlagsFromConnectivity_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-PropagateFlagsFromConnectivity_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+PropagateFlagsFromConnectivity_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 Scotch_matlab_la_SOURCES = ../Scotch/Scotch.cpp
 Scotch_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-Scotch_matlab_la_LIBADD = ${deps} $(SCOTCHLIB) $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(BLASLAPACKLIB)
+Scotch_matlab_la_LIBADD = ${deps}  $(PETSCLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(BLASLAPACKLIB)
 
 ShpRead_matlab_la_SOURCES = ../ShpRead/ShpRead.cpp
 ShpRead_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ShpRead_matlab_la_LIBADD = ${deps} $(SHAPELIBLIB) $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+ShpRead_matlab_la_LIBADD = ${deps} $(SHAPELIBLIB) $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 Triangle_matlab_la_SOURCES = ../Triangle/Triangle.cpp
 Triangle_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-Triangle_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(TRIANGLELIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+Triangle_matlab_la_LIBADD = ${deps} $(TRIANGLELIB) $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 ProcessRifts_matlab_la_SOURCES = ../ProcessRifts/ProcessRifts.cpp
 ProcessRifts_matlab_la_CXXFLAGS = ${AM_CXXFLAGS}
-ProcessRifts_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
-#}}}
+ProcessRifts_matlab_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+#}}}
Index: /issm/trunk/src/wrappers/python/Makefile.am
===================================================================
--- /issm/trunk/src/wrappers/python/Makefile.am	(revision 24685)
+++ /issm/trunk/src/wrappers/python/Makefile.am	(revision 24686)
@@ -5,14 +5,16 @@
 
 #define prefix (from http://www.gnu.org/software/autoconf/manual/autoconf-2.67/html_node/Defining-Directories.html)
-AM_CPPFLAGS+=  -DISSM_PREFIX='"$(prefix)"'
+AM_CPPFLAGS += -DISSM_PREFIX='"$(prefix)"'
 
 #python io{{{
 lib_LTLIBRARIES = libISSMPython.la
 
-io_sources= ./io/WritePythonData.cpp\
-				./io/CheckNumPythonArguments.cpp\
-				./io/FetchPythonData.cpp
+io_sources = \
+	./io/CheckNumPythonArguments.cpp \
+	./io/FetchPythonData.cpp \
+	./io/WritePythonData.cpp
 
-ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS) 
+ALLCXXFLAGS= -fPIC -D_WRAPPERS_ $(CXXFLAGS) $(CXXOPTFLAGS)
+
 libISSMPython_la_SOURCES = $(io_sources)
 libISSMPython_la_CXXFLAGS= $(ALLCXXFLAGS)
@@ -20,6 +22,5 @@
 #api io{{{
 lib_LTLIBRARIES += libISSMApi.la
-
-api_sources= ./io/ApiPrintf.cpp
+api_sources		 = ./io/ApiPrintf.cpp
 
 libISSMApi_la_SOURCES = $(api_sources)
@@ -27,145 +28,158 @@
 #}}}
 #Wrappers {{{
-if WRAPPERS
-lib_LTLIBRARIES += BamgConvertMesh_python.la\
-						BamgMesher_python.la\
-						BamgTriangulate_python.la\
-						ContourToMesh_python.la\
-						ContourToNodes_python.la\
-						ElementConnectivity_python.la\
-						ExpToLevelSet_python.la\
-						InterpFromMeshToMesh2d_python.la\
-						InterpFromMeshToMesh3d_python.la\
-						InterpFromGridToMesh_python.la\
-						InterpFromMeshToGrid_python.la\
-						IssmConfig_python.la\
-						MeshPartition_python.la\
-						MeshProfileIntersection_python.la\
-						NodeConnectivity_python.la\
-						Triangle_python.la\
-						ProcessRifts_python.la
+lib_LTLIBRARIES += \
+	BamgConvertMesh_python.la \
+	BamgMesher_python.la \
+	BamgTriangulate_python.la \
+	ContourToMesh_python.la \
+	ContourToNodes_python.la \
+	ElementConnectivity_python.la \
+	ExpToLevelSet_python.la \
+	InterpFromMeshToMesh2d_python.la \
+	InterpFromMeshToMesh3d_python.la \
+	InterpFromGridToMesh_python.la \
+	InterpFromMeshToGrid_python.la \
+	IssmConfig_python.la \
+	MeshPartition_python.la \
+	MeshProfileIntersection_python.la \
+	NodeConnectivity_python.la \
+	Triangle_python.la \
+	ProcessRifts_python.la
 
 if CHACO
 lib_LTLIBRARIES += Chaco_python.la
 endif
-endif 
 #}}}
 #Flags and libraries {{{
-deps =  $(MATHLIB) ${PYTHONLIB}
+deps = $(MATHLIB) ${PYTHONLIB}
 
 #Triangle library
-AM_CXXFLAGS =  -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER -D_WRAPPERS_
+AM_CXXFLAGS = -DTRILIBRARY -DANSI_DECLARATORS -DNO_TIMER -D_WRAPPERS_
 
 #Python part
-AM_LDFLAGS   = $(PYTHONLINK) -shrext ${EXEEXT} -module
-if VERSION
-AM_LDFLAGS +=
-else
+AM_LDFLAGS = $(PYTHONLINK) -shrext ${EXEEXT} -module
+
+if !VERSION
 AM_LDFLAGS += -avoid-version
 endif
 
+# NOTE:
+# - On Linux, We probably do not need the -static flag as long as we only
+#	generate static libraries for external packages. Dynamic system libraries
+#	will be linked to, whether we like it or not, if no static version is
+#	available.
+# - On macOS, static linking of binaries is not supported.
+#
 if STANDALONE_MODULES
-AM_LDFLAGS += -Wl,-static
+if MAC
+AM_LDFLAGS += -Wl,-rpath,'@rpath'
+else
+AM_LDFLAGS += -Wl,-static -Wl,--disable-new-dtags -Wl,-rpath,'$$ORIGIN'
+endif
 endif
 
-deps += ./libISSMPython.la 
-AM_LDFLAGS += --no-warnings 
+AM_LDFLAGS 	+= --no-warnings
+AM_CXXFLAGS += -D_HAVE_PYTHON_MODULES_ -fPIC
 
-AM_CXXFLAGS +=  -D_HAVE_PYTHON_MODULES_   -fPIC
 if PYTHON3
-AM_CXXFLAGS +=  -DNPY_NO_DEPRECATED_API 
+AM_CXXFLAGS += -DNPY_NO_DEPRECATED_API
 endif
 
-deps += ../../c/libISSMModules.la ../../c/libISSMCore.la
+deps += ./libISSMPython.la ../../c/libISSMModules.la ../../c/libISSMCore.la ./libISSMApi.la
+
 if ADOLC
 deps += $(ADOLCLIB)
 endif
 
-deps += ./libISSMApi.la 
-
-if STANDALONE_LIBRARIES
-libISSMPython_la_LDFLAGS = -static 
-endif
-if STANDALONE_LIBRARIES
-libISSMApi_la_LDFLAGS = -static 
+if FORTRAN
+deps += $(FLIBS) $(FORTRANLIB)
 endif
 
 #Optimization flags:
-AM_CXXFLAGS += $(CXXOPTFLAGS) 
+AM_CXXFLAGS += $(CXXOPTFLAGS)
 #}}}
-#Bin sources {{{
+# Module sources and dependencies {{{
+libISSMPython_la_LIBADD = ./../../c/libISSMCore.la ./../../c/libISSMModules.la $(PETSCLIB) $(MPILIB) $(NEOPZLIB) $(MKLLIB) $(GSLLIB) $(PROJ4LIB) $(MATHLIB)
+
+if STANDALONE_LIBRARIES
+libISSMPython_la_LDFLAGS = -static
+libISSMApi_la_LDFLAGS = -static
+deps += $(DAKOTALIB) $(PETSCLIB) $(TAOLIB) $(NEOPZLIB) $(M1QN3LIB) $(SEMICLIB) $(PLAPACKLIB) $(MUMPSLIB) $(SUPERLULIB) $(SPOOLESLIB) $(TRIANGLELIB) $(SCALAPACKLIB) $(BLACSLIB) $(HYPRELIB) $(SPAILIB) $(PROMETHEUSLIB) $(PASTIXLIB) $(MLLIB) $(METISLIB) $(CHACOLIB) $(SCOTCHLIB) $(BLASLAPACKLIB) $(MKLLIB) $(MPILIB) $(MATHLIB) $(GRAPHICSLIB) $(MULTITHREADINGLIB) $(OSLIBS) $(GSLLIB) $(ADOLCLIB) $(AMPILIB) $(METEOIOLIB) $(SNOWPACKLIB)
+endif
+
+libISSMApi_la_LIBADD = $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(MATHLIB)
+
 BamgConvertMesh_python_la_SOURCES = ../BamgConvertMesh/BamgConvertMesh.cpp
 BamgConvertMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgConvertMesh_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+BamgConvertMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 BamgMesher_python_la_SOURCES = ../BamgMesher/BamgMesher.cpp
 BamgMesher_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgMesher_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+BamgMesher_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 BamgTriangulate_python_la_SOURCES = ../BamgTriangulate/BamgTriangulate.cpp
 BamgTriangulate_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-BamgTriangulate_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+BamgTriangulate_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 if CHACO
 Chaco_python_la_SOURCES = ../Chaco/Chaco.cpp
 Chaco_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-Chaco_python_la_LIBADD = ${deps} $(MPILIB)  $(CHACOLIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+Chaco_python_la_LIBADD = ${deps} $(CHACOLIB) $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 endif
 
 ContourToMesh_python_la_SOURCES = ../ContourToMesh/ContourToMesh.cpp
 ContourToMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ContourToMesh_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+ContourToMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 ContourToNodes_python_la_SOURCES = ../ContourToNodes/ContourToNodes.cpp
 ContourToNodes_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ContourToNodes_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+ContourToNodes_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 ElementConnectivity_python_la_SOURCES = ../ElementConnectivity/ElementConnectivity.cpp
 ElementConnectivity_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ElementConnectivity_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+ElementConnectivity_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 ExpToLevelSet_python_la_SOURCES = ../ExpToLevelSet/ExpToLevelSet.cpp
 ExpToLevelSet_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ExpToLevelSet_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+ExpToLevelSet_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB) $(NEOPZLIB)
 
+InterpFromGridToMesh_python_la_SOURCES = ../InterpFromGridToMesh/InterpFromGridToMesh.cpp
+InterpFromGridToMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
+InterpFromGridToMesh_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
+
+InterpFromMeshToGrid_python_la_SOURCES = ../InterpFromMeshToGrid/InterpFromMeshToGrid.cpp
+InterpFromMeshToGrid_python_la_CXXFLAGS = ${AM_CXXFLAGS}
+InterpFromMeshToGrid_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 InterpFromMeshToMesh2d_python_la_SOURCES = ../InterpFromMeshToMesh2d/InterpFromMeshToMesh2d.cpp
 InterpFromMeshToMesh2d_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToMesh2d_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
+InterpFromMeshToMesh2d_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
 
 InterpFromMeshToMesh3d_python_la_SOURCES = ../InterpFromMeshToMesh3d/InterpFromMeshToMesh3d.cpp
 InterpFromMeshToMesh3d_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToMesh3d_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
-
-InterpFromGridToMesh_python_la_SOURCES = ../InterpFromGridToMesh/InterpFromGridToMesh.cpp
-InterpFromGridToMesh_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromGridToMesh_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
-
-InterpFromMeshToGrid_python_la_SOURCES = ../InterpFromMeshToGrid/InterpFromMeshToGrid.cpp
-InterpFromMeshToGrid_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-InterpFromMeshToGrid_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
+InterpFromMeshToMesh3d_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(MULTITHREADINGLIB) $(GSLLIB) $(PROJ4LIB)
 
 IssmConfig_python_la_SOURCES = ../IssmConfig/IssmConfig.cpp
 IssmConfig_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-IssmConfig_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB)
+IssmConfig_python_la_LIBADD = ${deps} $(DAKOTALIB) $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB)
 
 MeshPartition_python_la_SOURCES = ../MeshPartition/MeshPartition.cpp
 MeshPartition_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-MeshPartition_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+MeshPartition_python_la_LIBADD = ${deps} $(PETSCLIB) $(METISLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 MeshProfileIntersection_python_la_SOURCES = ../MeshProfileIntersection/MeshProfileIntersection.cpp
 MeshProfileIntersection_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-MeshProfileIntersection_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+MeshProfileIntersection_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 NodeConnectivity_python_la_SOURCES = ../NodeConnectivity/NodeConnectivity.cpp
 NodeConnectivity_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-NodeConnectivity_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+NodeConnectivity_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 Triangle_python_la_SOURCES = ../Triangle/Triangle.cpp
 Triangle_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-Triangle_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(TRIANGLELIB) $(GSLLIB) $(PROJ4LIB)
+Triangle_python_la_LIBADD = ${deps} $(TRIANGLELIB) $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 
 ProcessRifts_python_la_SOURCES = ../ProcessRifts/ProcessRifts.cpp
 ProcessRifts_python_la_CXXFLAGS = ${AM_CXXFLAGS}
-ProcessRifts_python_la_LIBADD = ${deps} $(MPILIB) $(PETSCLIB) $(GSLLIB) $(PROJ4LIB)
+ProcessRifts_python_la_LIBADD = ${deps} $(PETSCLIB) $(BLASLAPACKLIB) $(MPILIB) $(NEOPZLIB) $(GSLLIB) $(PROJ4LIB)
 #}}}
Index: /issm/trunk/src/wrappers/python/io/FetchPythonData.cpp
===================================================================
--- /issm/trunk/src/wrappers/python/io/FetchPythonData.cpp	(revision 24685)
+++ /issm/trunk/src/wrappers/python/io/FetchPythonData.cpp	(revision 24686)
@@ -1296,11 +1296,9 @@
 void FetchData(char** pstring,PyObject* py_string){
 
-	char* string=NULL;
-
 	/*extract internal string: */
 	#if _PYTHON_MAJOR_ == 3
-	string=PyUnicode_AsUTF8(py_string);
+	const char* string=PyUnicode_AsUTF8(py_string);
 	#else
-	string=PyString_AsString(py_string);
+	char* string=PyString_AsString(py_string);
 	#endif
 	/*copy string (note strlen does not include trailing NULL): */
Index: /issm/trunk/test/NightlyRun/GetIds.py
===================================================================
--- /issm/trunk/test/NightlyRun/GetIds.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/GetIds.py	(revision 24686)
@@ -44,4 +44,6 @@
         if np.array([type(i) == int for i in ids_names]).all():
             ids = ids_names
+        if np.array([type(i) == np.int64 for i in ids_names]).all():
+            ids = ids_names
         elif np.array([type(i) == str for i in ids_names]).all():
             ids = np.concatenate([IdFromString(i) for i in ids_names])
Index: /issm/trunk/test/NightlyRun/Makefile
===================================================================
--- /issm/trunk/test/NightlyRun/Makefile	(revision 24685)
+++ /issm/trunk/test/NightlyRun/Makefile	(revision 24686)
@@ -1,2 +1,2 @@
 clean: 
-	rm -rf *.tar.gz *.bin *.errlog *.outlog  *.outbin *.petsc *.queue *.toolkits *.run valgrind.log* *.bat *.lock qmu*
+	rm -rf *.tar.gz *.bin *.errlog *.outlog  *.outbin *.petsc *.queue *.toolkits *.run valgrind.log* *.bat *.lock qmu* dakota_tabular.dat
Index: /issm/trunk/test/NightlyRun/runme.py
===================================================================
--- /issm/trunk/test/NightlyRun/runme.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/runme.py	(revision 24686)
@@ -144,16 +144,16 @@
                 if os.path.isfile(archive_file):
                     os.remove(archive_file)
-                    for k, fieldname in enumerate(field_names):
-                        field = np.array(field_values[k], dtype=float)
-                        if len(field.shape) == 1:
-                            if np.size(field):
-                                field = field.reshape(np.size(field), 1)
-                            else:
-                                field = field.reshape(0, 0)
-                        elif len(field.shape) == 0:
-                            field = field.reshape(1, 1)
-                            # Matlab uses base 1, so use base 1 in labels
-                        archwrite(archive_file, archive_name + '_field' + str(k + 1), field)
-                    print(("File {} saved. \n".format(os.path.join('..', 'Archives', archive_name + '.arch'))))
+                for k, fieldname in enumerate(field_names):
+                    field = np.array(field_values[k], dtype=float)
+                    if len(field.shape) == 1:
+                        if np.size(field):
+                            field = field.reshape(np.size(field), 1)
+                        else:
+                            field = field.reshape(0, 0)
+                    elif len(field.shape) == 0:
+                        field = field.reshape(1, 1)
+                        # Matlab uses base 1, so use base 1 in labels
+                    archwrite(archive_file, archive_name + '_field' + str(k + 1), field)
+                print(("File {} saved. \n".format(os.path.join('..', 'Archives', archive_name + '.arch'))))
 
                     #ELSE: CHECK TEST
Index: /issm/trunk/test/NightlyRun/test1101.py
===================================================================
--- /issm/trunk/test/NightlyRun/test1101.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test1101.py	(revision 24686)
@@ -41,5 +41,5 @@
     md.stressbalance.spcvy[pos] = 0.
 
-#Create MPCs to have periodic boundary conditions
+#Create MPCs to have periodic boundary conditions this is done on matlab indexing
     posx = np.where(md.mesh.x == 0.)[0]
     posx2 = np.where(md.mesh.x == np.max(md.mesh.x))[0]
Index: /issm/trunk/test/NightlyRun/test1104.py
===================================================================
--- /issm/trunk/test/NightlyRun/test1104.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test1104.py	(revision 24686)
@@ -44,5 +44,5 @@
 
     md.stressbalance.vertex_pairing = np.vstack((np.vstack((posx + 1, posx2 + 1)).T, np.vstack((posy + 1, posy2 + 1)).T))
-    print(np.shape(md.stressbalance.vertex_pairing))
+
 #Compute the stressbalance
     md.stressbalance.abstol = np.nan
@@ -62,5 +62,5 @@
     results.append(md.results.StressbalanceSolution)
 
-#       plotmodel(md, 'data', vx, 'data', vy, 'data', vz, 'layer  #all', md.mesh.numberoflayers)
+#       plotmodel(md, 'data', vx, 'data', vy, 'data', vz, 'layer#all', md.mesh.numberoflayers)
 
 #Fields and tolerances to track changes
Index: /issm/trunk/test/NightlyRun/test1110.m
===================================================================
--- /issm/trunk/test/NightlyRun/test1110.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test1110.m	(revision 24686)
@@ -1,9 +1,9 @@
 %Test Name: ISMIPF
 %This test is a test from the ISMP-HOM Intercomparison project.
-%TestF 
+%TestF
 printingflag=false;
 results={};
 
-for i=1:4,
+for i=3,  %1:4,
 	L=100000.; %in m
 	nx=30; %numberof nodes in x direction
@@ -55,5 +55,5 @@
 	md.masstransport.stabilization=1;
 	md.stressbalance.maxiter=1;
-	
+
 	%Compute the stressbalance
 	md.cluster=generic('name',oshostname(),'np',8);
@@ -63,6 +63,6 @@
 	%save the results
 	results{i}=md.results.TransientSolution(end);
-	
-	%Now plot vx and delta surface 
+
+	%Now plot vx and delta surface
 	if (i==1 | i==3),
 		plotmodel(md,'data',(md.results.TransientSolution(end).Vx),'layer',md.mesh.numberoflayers,'sectionvalue','../Exp/ISMIP100000.exp','title','','xlabel','','ylabel','Velocity (m/yr)','linewidth',3,'grid','on','unit','km','ylim',[91 100])
Index: /issm/trunk/test/NightlyRun/test1110.py
===================================================================
--- /issm/trunk/test/NightlyRun/test1110.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test1110.py	(revision 24686)
@@ -4,9 +4,9 @@
 from socket import gethostname
 from bamg import *
-from setmask import *
-from parameterize import *
-from setflowequation import *
-from solve import *
-from squaremesh import *
+from setmask import setmask
+from parameterize import parameterize
+from setflowequation import setflowequation
+from solve import solve
+from squaremesh import squaremesh
 
 #This test is a test from the ISMP - HOM Intercomparison project.
@@ -15,5 +15,5 @@
 results = []
 
-for i in range(4):
+for i in [1]:  #range(4):
     L = 100000.  #in m
     nx = 30  #numberof nodes in x direction
@@ -25,5 +25,4 @@
     md = parameterize(md, '../Par/ISMIPF.py')
     md = md.extrude(4, 1.)
-
     if (i == 0 or i == 1):
         md = setflowequation(md, 'HO', 'all')
@@ -42,6 +41,6 @@
     else:
         posA = np.where(md.mesh.vertexonbase)
-        posB = np.unique(np.concatenate(np.where(md.mesh.x == 0.), np.where(md.mesh.x == max(md.mesh.x))))
-        posC = np.unique(np.concatenate(np.where(md.mesh.y == 0.), np.where(md.mesh.y == max(md.mesh.y))))
+        posB = np.unique(np.hstack((np.where(md.mesh.x == 0.), np.where(md.mesh.x == np.nanmax(md.mesh.x)))))
+        posC = np.unique(np.hstack((np.where(md.mesh.y == 0.), np.where(md.mesh.y == np.nanmax(md.mesh.y)))))
         pos = np.intersect1d(np.intersect1d(posA, posB), posC)
         md.stressbalance.spcvx[pos] = 100.  #because we need a dirichlet somewhere
@@ -53,16 +52,17 @@
 
     #Create MPCs to have periodic boundary conditions
-    posx = np.where(md.mesh.x == 0.)
-    posx2 = np.where(md.mesh.x == max(md.mesh.x))
+    posx = np.where(md.mesh.x == 0.)[0]
+    posx2 = np.where(md.mesh.x == max(md.mesh.x))[0]
+    # posy = np.where(np.logical_and.reduce((md.mesh.y == 0., md.mesh.x != 0., md.mesh.x != np.max(md.mesh.x))))[0]  #Don't take the same nodes two times
+    # posy2 = np.where(np.logical_and.reduce((md.mesh.y == np.max(md.mesh.y), md.mesh.x != 0., md.mesh.x != np.max(md.mesh.x))))[0]
+    posy = np.where(md.mesh.y == 0)[0]
+    posy2 = np.where(md.mesh.y == np.max(md.mesh.y))[0]
 
-    posy = np.where(md.mesh.y == 0.)
-    posy2 = np.where(md.mesh.y == max(md.mesh.y))
-
-    md.stressbalance.vertex_pairing = np.column_stack((posx, posx2, posy, posy2))
-    md.masstransport.vertex_pairing = np.column_stack((posx, posx2, posy, posy2))
+    md.stressbalance.vertex_pairing = np.vstack((np.vstack((posx + 1, posx2 + 1)).T, np.vstack((posy + 1, posy2 + 1)).T))
+    md.masstransport.vertex_pairing = np.vstack((np.vstack((posx + 1, posx2 + 1)).T, np.vstack((posy + 1, posy2 + 1)).T))
 
     md.timestepping.time_step = 3.
-    md.timestepping.final_time = 300.
-    md.settings.output_frequency = 50
+    md.timestepping.final_time = 30.  #300.
+    md.settings.output_frequency = 5  #50
     md.masstransport.stabilization = 1
     md.stressbalance.maxiter = 1
@@ -74,9 +74,9 @@
 
     #save the results
-    results[i] = md.results.TransientSolution()
+    results = np.append(results, md.results.TransientSolution[-1])
 
     #Now plot vx and delta surface
     if (i == 0 or i == 2):
-        plotmodel(md, 'data', (md.results.TransientSolution().Vx),
+        plotmodel(md, 'data', np.squeeze(md.results.TransientSolution[-1].Vx),
                   'layer', md.mesh.numberoflayers,
                   'sectionvalue', '../Exp/ISMIP100000.exp',
@@ -89,5 +89,5 @@
                   'ylim', [91, 100])
     elif (i == 1 or i == 3):
-        plotmodel(md, 'data', (md.results.TransientSolution().Vx),
+        plotmodel(md, 'data', np.squeeze(md.results.TransientSolution[-1].Vx),
                   'layer', md.mesh.numberoflayers,
                   'sectionvalue', '../Exp/ISMIP100000.exp',
@@ -115,5 +115,5 @@
             #system(['mv ismipfFSvxsliding.png ' ISSM_DIR '/website/doc_pdf/validation/Images/ISMIP/TestF'])
 
-    plotmodel(md, 'data', (md.results.TransientSolution().Surface) - md.geometry.surface,
+    plotmodel(md, 'data', np.squeeze(md.results.TransientSolution[-1].Surface) - md.geometry.surface,
               'layer', md.mesh.numberoflayers,
               'sectionvalue', '../Exp/ISMIP100000.exp',
Index: /issm/trunk/test/NightlyRun/test124.js
===================================================================
--- /issm/trunk/test/NightlyRun/test124.js	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test124.js	(revision 24686)
@@ -24,5 +24,5 @@
 	2e-09,3e-9,3e-9,3e-9,1e-13,1e-12,1e-12,
 	2e-09,3e-9,3e-9,3e-9,1e-10,1e-10,1e-10,
-	2e-09,3e-9,3e-9,3e-9,1e-10,1e-10,1e-10];
+	3e-09,3e-9,3e-9,3e-9,1e-10,1e-10,1e-10];
 field_values=[
 	(md.results.TransientSolution[0](1).Vx),
Index: /issm/trunk/test/NightlyRun/test124.m
===================================================================
--- /issm/trunk/test/NightlyRun/test124.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test124.m	(revision 24686)
@@ -23,5 +23,5 @@
 	2e-09,3e-9,3e-9,3e-9,1e-13,1e-12,1e-12,...
 	2e-09,3e-9,3e-9,3e-9,1e-10,1e-10,1e-10,...
-	2e-09,3e-9,3e-9,3e-9,1e-10,1e-10,1e-10};
+	3e-09,3e-9,3e-9,3e-9,1e-10,1e-10,1e-10};
 field_values={...
 	(md.results.TransientSolution(1).Vx),...
Index: /issm/trunk/test/NightlyRun/test124.py
===================================================================
--- /issm/trunk/test/NightlyRun/test124.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test124.py	(revision 24686)
@@ -29,5 +29,5 @@
 field_tolerances = [2e-09, 3e-9, 3e-9, 3e-9, 1e-13, 1e-12, 1e-12,
                     2e-09, 3e-9, 3e-9, 3e-9, 1e-10, 1e-10, 1e-10,
-                    2e-09, 3e-9, 3e-9, 3e-9, 1e-10, 1e-10, 1e-10]
+                    3e-09, 3e-9, 3e-9, 3e-9, 1e-10, 1e-10, 1e-10]
 field_values = [md.results.TransientSolution[0].Vx,
                 md.results.TransientSolution[0].Vy,
Index: /issm/trunk/test/NightlyRun/test2002.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2002.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2002.m	(revision 24686)
@@ -9,5 +9,7 @@
 md.slr.deltathickness=zeros(md.mesh.numberofelements,1);
 md.slr.sealevel=zeros(md.mesh.numberofvertices,1);
-md.slr.steric_rate=zeros(md.mesh.numberofvertices,1);
+md.dsl.global_average_thermosteric_sea_level_change=[0;0];
+md.dsl.sea_surface_height_change_above_geoid=zeros(md.mesh.numberofvertices+1,1);
+md.dsl.sea_water_pressure_change_at_sea_floor=zeros(md.mesh.numberofvertices+1,1);
 %antarctica
 late=sum(md.mesh.lat(md.mesh.elements),2)/3;
Index: /issm/trunk/test/NightlyRun/test2002.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2002.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2002.py	(revision 24686)
@@ -18,5 +18,8 @@
 md.slr.deltathickness = np.zeros((md.mesh.numberofelements))
 md.slr.sealevel = np.zeros((md.mesh.numberofvertices))
-md.slr.steric_rate = np.zeros((md.mesh.numberofvertices))
+md.dsl.global_average_thermosteric_sea_level_change=np.zeros((2, ))
+md.dsl.sea_surface_height_change_above_geoid=np.zeros((md.mesh.numberofvertices+1, ))
+md.dsl.sea_water_pressure_change_at_sea_floor=zeros((md.mesh.numberofvertices+1, ))
+
 
 #antarctica
Index: /issm/trunk/test/NightlyRun/test2003.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2003.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2003.m	(revision 24686)
@@ -9,5 +9,8 @@
 md.slr.deltathickness=zeros(md.mesh.numberofelements,1);
 md.slr.sealevel=zeros(md.mesh.numberofvertices,1);
-md.slr.steric_rate=zeros(md.mesh.numberofvertices,1);
+md.dsl.global_average_thermosteric_sea_level_change=[0;0];
+md.dsl.sea_surface_height_change_above_geoid=zeros(md.mesh.numberofvertices+1,1);
+md.dsl.sea_water_pressure_change_at_sea_floor=zeros(md.mesh.numberofvertices+1,1);
+
 %antarctica
 late=sum(md.mesh.lat(md.mesh.elements),2)/3;
Index: /issm/trunk/test/NightlyRun/test2003.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2003.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2003.py	(revision 24686)
@@ -18,5 +18,8 @@
 md.slr.deltathickness = np.zeros((md.mesh.numberofelements, ))
 md.slr.sealevel = np.zeros((md.mesh.numberofvertices, ))
-md.slr.steric_rate = np.zeros((md.mesh.numberofvertices, ))
+md.dsl.global_average_thermosteric_sea_level_change=np.zeros((2, ))
+md.dsl.sea_surface_height_change_above_geoid=np.zeros((md.mesh.numberofvertices+1, ))
+md.dsl.sea_water_pressure_change_at_sea_floor=zeros((md.mesh.numberofvertices+1, ))
+
 
 #antarctica
Index: /issm/trunk/test/NightlyRun/test2010.m
===================================================================
--- /issm/trunk/test/NightlyRun/test2010.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2010.m	(revision 24686)
@@ -16,5 +16,8 @@
 
 md.slr.sealevel=zeros(md.mesh.numberofvertices,1);
-md.slr.steric_rate=zeros(md.mesh.numberofvertices,1);
+md.dsl.global_average_thermosteric_sea_level_change=[0;0];
+md.dsl.sea_surface_height_change_above_geoid=zeros(md.mesh.numberofvertices+1,1);
+md.dsl.sea_water_pressure_change_at_sea_floor=zeros(md.mesh.numberofvertices+1,1);
+
 md.slr.ocean_area_scaling = 1;
 
Index: /issm/trunk/test/NightlyRun/test2010.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2010.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2010.py	(revision 24686)
@@ -26,5 +26,8 @@
 
 md.slr.sealevel = np.zeros((md.mesh.numberofvertices, ))
-md.slr.steric_rate = np.zeros((md.mesh.numberofvertices, ))
+md.dsl.global_average_thermosteric_sea_level_change=np.zeros((2, ))
+md.dsl.sea_surface_height_change_above_geoid=np.zeros((md.mesh.numberofvertices+1, ))
+md.dsl.sea_water_pressure_change_at_sea_floor=zeros((md.mesh.numberofvertices+1, ))
+
 md.slr.ocean_area_scaling = 1
 
Index: /issm/trunk/test/NightlyRun/test220.js
===================================================================
--- /issm/trunk/test/NightlyRun/test220.js	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test220.js	(revision 24686)
@@ -11,11 +11,10 @@
 
 //Fields and tolerances to track changes
-field_names     =['Vx','Vy','Vz','Vel','Pressure'];
-field_tolerances=[1e-09,1e-09,5e-06,1e-09,1e-09];
+field_names     =['Vx','Vy','Vz','Vel'];
+field_tolerances=[1e-09,1e-09,5e-06,1e-09];
 field_values=[
 	(md.results.StressbalanceSolution[0].Vx),
 	(md.results.StressbalanceSolution[0].Vy),
 	(md.results.StressbalanceSolution[0].Vz),
-	(md.results.StressbalanceSolution[0].Vel),
-	(md.results.StressbalanceSolution[0].Pressure),
-	];
+	(md.results.StressbalanceSolution[0].Vel)
+];
Index: /issm/trunk/test/NightlyRun/test220.m
===================================================================
--- /issm/trunk/test/NightlyRun/test220.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test220.m	(revision 24686)
@@ -10,11 +10,10 @@
 
 %Fields and tolerances to track changes
-field_names     ={'Vx','Vy','Vz','Vel','Pressure'};
-field_tolerances={1e-09,1e-09,5e-06,1e-09,1e-09};
+field_names     ={'Vx','Vy','Vz','Vel'};
+field_tolerances={1e-09,1e-09,5e-06,1e-09};
 field_values={...
 	(md.results.StressbalanceSolution.Vx),...
 	(md.results.StressbalanceSolution.Vy),...
 	(md.results.StressbalanceSolution.Vz),...
-	(md.results.StressbalanceSolution.Vel),...
-	(md.results.StressbalanceSolution.Pressure),...
-	};
+	(md.results.StressbalanceSolution.Vel)...
+};
Index: /issm/trunk/test/NightlyRun/test220.py
===================================================================
--- /issm/trunk/test/NightlyRun/test220.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test220.py	(revision 24686)
@@ -20,9 +20,9 @@
 # Fields and tolerances to track changes
 
-field_names = ['Vx', 'Vy', 'Vz', 'Vel', 'Pressure']
-field_tolerances = [1e-09, 1e-09, 5e-06, 1e-09, 1e-09]
+field_names = ['Vx', 'Vy', 'Vz', 'Vel']
+field_tolerances = [1e-09, 1e-09, 5e-06, 1e-09]
 field_values = [md.results.StressbalanceSolution.Vx,
                 md.results.StressbalanceSolution.Vy,
                 md.results.StressbalanceSolution.Vz,
-                md.results.StressbalanceSolution.Vel,
-                md.results.StressbalanceSolution.Pressure]
+                md.results.StressbalanceSolution.Vel
+]
Index: /issm/trunk/test/NightlyRun/test221.js
===================================================================
--- /issm/trunk/test/NightlyRun/test221.js	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test221.js	(revision 24686)
@@ -10,11 +10,10 @@
 
 //Fields and tolerances to track changes
-field_names     =['Vx','Vy','Vz','Vel','Pressure'];
-field_tolerances=[1e-09,1e-09,5e-06,1e-09,1e-09];
+field_names     =['Vx','Vy','Vz','Vel'];
+field_tolerances=[1e-09,1e-09,5e-06,1e-09];
 field_values=[
 	(md.results.StressbalanceSolution[0].Vx),
 	(md.results.StressbalanceSolution[0].Vy),
 	(md.results.StressbalanceSolution[0].Vz),
-	(md.results.StressbalanceSolution[0].Vel),
-	(md.results.StressbalanceSolution[0].Pressure),
-	];
+	(md.results.StressbalanceSolution[0].Vel)
+];
Index: /issm/trunk/test/NightlyRun/test221.m
===================================================================
--- /issm/trunk/test/NightlyRun/test221.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test221.m	(revision 24686)
@@ -9,6 +9,6 @@
 
 %Fields and tolerances to track changes
-field_names     ={'Vx','Vy','Vz','Vel','Pressure'};
-field_tolerances={1e-09,1e-09,5e-06,1e-09,1e-09};
+field_names     ={'Vx','Vy','Vz','Vel'};
+field_tolerances={1e-09,1e-09,5e-06,1e-09};
 field_values={...
 	(md.results.StressbalanceSolution.Vx),...
@@ -16,4 +16,3 @@
 	(md.results.StressbalanceSolution.Vz),...
 	(md.results.StressbalanceSolution.Vel),...
-	(md.results.StressbalanceSolution.Pressure),...
-	};
+};
Index: /issm/trunk/test/NightlyRun/test221.py
===================================================================
--- /issm/trunk/test/NightlyRun/test221.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test221.py	(revision 24686)
@@ -18,9 +18,9 @@
 
 #Fields and tolerances to track changes
-field_names = ['Vx', 'Vy', 'Vz', 'Vel', 'Pressure']
-field_tolerances = [1e-09, 1e-09, 5e-06, 1e-09, 1e-09]
+field_names = ['Vx', 'Vy', 'Vz', 'Vel']
+field_tolerances = [1e-09, 1e-09, 5e-06, 1e-09]
 field_values = [md.results.StressbalanceSolution.Vx,
                 md.results.StressbalanceSolution.Vy,
                 md.results.StressbalanceSolution.Vz,
-                md.results.StressbalanceSolution.Vel,
-                md.results.StressbalanceSolution.Pressure]
+                md.results.StressbalanceSolution.Vel
+]
Index: /issm/trunk/test/NightlyRun/test228.py
===================================================================
--- /issm/trunk/test/NightlyRun/test228.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test228.py	(revision 24686)
@@ -22,5 +22,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb.mass_balance = np.vstack((smb, [1.5, 3.]))
Index: /issm/trunk/test/NightlyRun/test230.py
===================================================================
--- /issm/trunk/test/NightlyRun/test230.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test230.py	(revision 24686)
@@ -23,5 +23,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb.mass_balance = np.vstack((smb, [1.5, 3.]))
Index: /issm/trunk/test/NightlyRun/test234.py
===================================================================
--- /issm/trunk/test/NightlyRun/test234.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test234.py	(revision 24686)
@@ -25,5 +25,5 @@
 
 smb = np.ones((md.mesh.numberofvertices, )) * 3.6
-smb = np.array([smb, smb * - 1]).T
+smb = np.array([smb, smb * -1]).T
 
 md.smb.mass_balance = smb
Index: /issm/trunk/test/NightlyRun/test235.py
===================================================================
--- /issm/trunk/test/NightlyRun/test235.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test235.py	(revision 24686)
@@ -25,5 +25,5 @@
 
 smb = np.ones((md.mesh.numberofvertices, )) * 3.6
-smb = np.array([smb, smb * - 1]).T
+smb = np.array([smb, smb * -1]).T
 
 md.smb.mass_balance = smb
Index: /issm/trunk/test/NightlyRun/test236.m
===================================================================
--- /issm/trunk/test/NightlyRun/test236.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test236.m	(revision 24686)
@@ -28,7 +28,4 @@
     md.smb.temperatures_presentday(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1);
     md.smb.temperatures_lgm(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1)-20.;
-    % Time for the last line:
-    md.smb.temperatures_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
-    md.smb.temperatures_lgm(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
@@ -47,7 +44,4 @@
     md.smb.precipitations_presentday(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
     md.smb.precipitations_lgm(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
-    % Time for the last line:
-    md.smb.precipitations_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
-    md.smb.precipitations_lgm(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
Index: /issm/trunk/test/NightlyRun/test236.py
===================================================================
--- /issm/trunk/test/NightlyRun/test236.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test236.py	(revision 24686)
@@ -30,12 +30,9 @@
 # Same temperature over the all region:
 tmonth = np.ones(12) * (238.15 + 20.)
-md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
-md.smb.temperatures_lgm = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices, 12))
+md.smb.temperatures_lgm = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.temperatures_presentday[0:md.mesh.numberofvertices, imonth] = tmonth[imonth]
     md.smb.temperatures_lgm[0:md.mesh.numberofvertices, imonth] = tmonth[imonth] - 20.
-# Time for the last line:
-    md.smb.temperatures_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
-    md.smb.temperatures_lgm[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 # creating initialization and spc temperatures initialization and spc
@@ -49,11 +46,9 @@
 
 # creating precipitation
-md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
-md.smb.precipitations_lgm = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices, 12))
+md.smb.precipitations_lgm = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.precipitations_presentday[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
     md.smb.precipitations_lgm[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_lgm[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 # Interpolation factors
Index: /issm/trunk/test/NightlyRun/test237.m
===================================================================
--- /issm/trunk/test/NightlyRun/test237.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test237.m	(revision 24686)
@@ -22,7 +22,4 @@
     md.smb.temperatures_presentday(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1);
     md.smb.temperatures_lgm(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1)-20.;
-    % Time for the last line:
-    md.smb.temperatures_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
-    md.smb.temperatures_lgm(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
@@ -40,7 +37,4 @@
     md.smb.precipitations_presentday(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
     md.smb.precipitations_lgm(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
-    % Time for the last line:
-    md.smb.precipitations_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
-    md.smb.precipitations_lgm(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
Index: /issm/trunk/test/NightlyRun/test237.py
===================================================================
--- /issm/trunk/test/NightlyRun/test237.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test237.py	(revision 24686)
@@ -28,12 +28,9 @@
 # Same temperature over the all region:
 tmonth = np.ones(12) * (238.15 + 20.)
-md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
-md.smb.temperatures_lgm = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices, 12))
+md.smb.temperatures_lgm = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.temperatures_presentday[0:md.mesh.numberofvertices, imonth] = tmonth[imonth]
     md.smb.temperatures_lgm[0:md.mesh.numberofvertices, imonth] = tmonth[imonth] - 20.
-# Time for the last line:
-    md.smb.temperatures_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
-    md.smb.temperatures_lgm[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 # creating initialization and spc temperatures initialization and spc
@@ -47,11 +44,9 @@
 
 # creating precipitation
-md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
-md.smb.precipitations_lgm = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices, 12))
+md.smb.precipitations_lgm = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.precipitations_presentday[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
     md.smb.precipitations_lgm[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_lgm[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 fsize = int(md.timestepping.final_time / md.timestepping.time_step) + 2
Index: /issm/trunk/test/NightlyRun/test238.m
===================================================================
--- /issm/trunk/test/NightlyRun/test238.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test238.m	(revision 24686)
@@ -22,6 +22,4 @@
 for imonth=0:11
     md.smb.temperatures_presentday(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1);
-    % Time for the last line:
-    md.smb.temperatures_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
@@ -34,6 +32,4 @@
 for imonth=0:11
     md.smb.precipitations_presentday(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
-    % Time for the last line:
-    md.smb.precipitations_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 md.smb = initialize(md.smb,md);
Index: /issm/trunk/test/NightlyRun/test238.py
===================================================================
--- /issm/trunk/test/NightlyRun/test238.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test238.py	(revision 24686)
@@ -26,9 +26,7 @@
 # Same temperature over the all region:
 tmonth = np.ones(12) * (238.15 + 20.)
-md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.temperatures_presentday[0:md.mesh.numberofvertices, imonth] = tmonth[imonth]
-# Time for the last line:
-    md.smb.temperatures_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 # creating initialization and spc temperatures initialization and spc
@@ -39,8 +37,7 @@
 
 # creating precipitation
-md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.precipitations_presentday[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_presentday[md.mesh.numberofvertices, imonth] = (float(imonth) / 12.)
 
 # time steps and resolution
Index: /issm/trunk/test/NightlyRun/test239.m
===================================================================
--- /issm/trunk/test/NightlyRun/test239.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test239.m	(revision 24686)
@@ -22,6 +22,4 @@
 for imonth=0:11
     md.smb.temperatures_presentday(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1);
-    % Time for the last line:
-    md.smb.temperatures_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
@@ -35,6 +33,4 @@
 for imonth=0:11
     md.smb.precipitations_presentday(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
-    % Time for the last line:
-    md.smb.precipitations_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 md.smb = initialize(md.smb,md);
Index: /issm/trunk/test/NightlyRun/test239.py
===================================================================
--- /issm/trunk/test/NightlyRun/test239.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test239.py	(revision 24686)
@@ -26,9 +26,7 @@
 # Same temperature over the all region:
 tmonth = np.ones(12) * (238.15 + 20.)
-md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.temperatures_presentday[0:md.mesh.numberofvertices, imonth] = tmonth[imonth]
-# Time for the last line:
-    md.smb.temperatures_presentday[md.mesh.numberofvertices, imonth] = (float(imonth) / 12.)
 
 # creating initialization and spc temperatures initialization and spc
@@ -39,8 +37,7 @@
 
 # creating precipitation
-md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.precipitations_presentday[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 # time steps and resolution
Index: /issm/trunk/test/NightlyRun/test240.m
===================================================================
--- /issm/trunk/test/NightlyRun/test240.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test240.m	(revision 24686)
@@ -22,6 +22,4 @@
 for imonth=0:11
     md.smb.temperatures_presentday(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1);
-    % Time for the last line:
-    md.smb.temperatures_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 md.smb = initialize(md.smb,md);
@@ -35,6 +33,4 @@
 for imonth=0:11
     md.smb.precipitations_presentday(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
-    % Time for the last line:
-    md.smb.precipitations_presentday(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
Index: /issm/trunk/test/NightlyRun/test240.py
===================================================================
--- /issm/trunk/test/NightlyRun/test240.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test240.py	(revision 24686)
@@ -26,9 +26,7 @@
 # Same temperature over the all region:
 tmonth = np.ones(12) * (238.15 + 20.)
-md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.temperatures_presentday = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.temperatures_presentday[0:md.mesh.numberofvertices, imonth] = tmonth[imonth]
-# Time for the last line:
-    md.smb.temperatures_presentday[md.mesh.numberofvertices, imonth] = (float(imonth) / 12.)
 
 # creating initialization and spc temperatures initialization and spc
@@ -39,8 +37,7 @@
 
 # creating precipitation
-md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices + 1, 12))
+md.smb.precipitations_presentday = np.zeros((md.mesh.numberofvertices, 12))
 for imonth in range(0, 12):
     md.smb.precipitations_presentday[0:md.mesh.numberofvertices, imonth] = -0.4 * 10**(-6) * md.mesh.y + 0.5
-    md.smb.precipitations_presentday[md.mesh.numberofvertices, imonth] = ((float(imonth) + 1.) / 12.)
 
 # time steps and resolution
Index: /issm/trunk/test/NightlyRun/test241.py
===================================================================
--- /issm/trunk/test/NightlyRun/test241.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test241.py	(revision 24686)
@@ -23,5 +23,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb.mass_balance = np.vstack((smb, [1.5, 3.]))
Index: /issm/trunk/test/NightlyRun/test242.py
===================================================================
--- /issm/trunk/test/NightlyRun/test242.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test242.py	(revision 24686)
@@ -24,5 +24,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb.mass_balance = np.vstack((smb, [1.5, 3.]))
Index: /issm/trunk/test/NightlyRun/test2424.py
===================================================================
--- /issm/trunk/test/NightlyRun/test2424.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test2424.py	(revision 24686)
@@ -45,5 +45,5 @@
 #time is off by the year constant
 for i in range(nsteps):
-    field_names.append('Time-' + str(md.results.TransientSolution[i].time) + ' - yr - ice_levelset - S - sl - (1 - di) * H')
+    field_names.append('Time-' + str(md.results.TransientSolution[i].time) + '-yr-ice_levelset-S-sl-(1-di) * H')
     field_tolerances.append(1e-12)
     field_values.append(md.results.TransientSolution[i].MaskGroundediceLevelset.reshape(-1, ) - (md.geometry.surface - md.results.TransientSolution[i].Sealevel.reshape(-1, ) - (1 - md.materials.rho_ice / md.materials.rho_water) * md.geometry.thickness))
Index: /issm/trunk/test/NightlyRun/test245.m
===================================================================
--- /issm/trunk/test/NightlyRun/test245.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test245.m	(revision 24686)
@@ -18,7 +18,5 @@
 for imonth=0:11
     md.smb.monthlytemperatures(1:md.mesh.numberofvertices,imonth+1)=md.materials.meltingpoint+temp_ma_present+(temp_mj_present-temp_ma_present)*sin(double(imonth+1-4)*pi/6.0);
-    md.smb.monthlytemperatures(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
     md.smb.precipitation(1:md.mesh.numberofvertices,imonth+1)=precipitation;
-    md.smb.precipitation(md.mesh.numberofvertices+1,imonth+1)=((imonth+1)/12);
 end
 
Index: /issm/trunk/test/NightlyRun/test245.py
===================================================================
--- /issm/trunk/test/NightlyRun/test245.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test245.py	(revision 24686)
@@ -23,6 +23,6 @@
 
 
-md.smb.monthlytemperatures = np.empty((md.mesh.numberofvertices + 1, 12))
-md.smb.precipitation = np.empty((md.mesh.numberofvertices + 1, 12))
+md.smb.monthlytemperatures = np.empty((md.mesh.numberofvertices, 12))
+md.smb.precipitation = np.empty((md.mesh.numberofvertices, 12))
 temp_ma_present = -10. * np.ones((md.mesh.numberofvertices, )) - md.smb.rlaps * md.geometry.surface / 1000.
 temp_mj_present = 10. * np.ones((md.mesh.numberofvertices, )) - md.smb.rlaps * md.geometry.surface / 1000.
@@ -30,7 +30,5 @@
 for imonth in range(12):
     md.smb.monthlytemperatures[0:md.mesh.numberofvertices, imonth] = md.materials.meltingpoint + temp_ma_present + (temp_mj_present - temp_ma_present) * np.sin((imonth + 1. - 4.) * np.pi / 6.0)
-    md.smb.monthlytemperatures[md.mesh.numberofvertices, imonth] = ((imonth + 1) / 12.)
     md.smb.precipitation[0:md.mesh.numberofvertices, imonth] = precipitation
-    md.smb.precipitation[md.mesh.numberofvertices, imonth] = ((imonth + 1) / 12.)
 
 # time steps and resolution
Index: /issm/trunk/test/NightlyRun/test247.m
===================================================================
--- /issm/trunk/test/NightlyRun/test247.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test247.m	(revision 24686)
@@ -0,0 +1,163 @@
+%Test Name: SquareShelfTranIspddIsdeltaO18pdNoInterpSSA2d
+md=triangle(model(),'../Exp/Square.exp',60000.);
+md=setmask(md,'all','');
+md=parameterize(md,'../Par/SquareShelf.par');
+
+%md.verbose=verbose('all');
+
+% Use of ispdd and isdelta18o methods
+md.smb = SMBd18opdd();
+md.smb.isd18opd=1;
+
+% Add temperature, precipitation and delta18o needed to measure the surface mass balance
+%  creating delta18o
+load '../Data/delta18o.data'
+md.smb.delta18o=delta18o;
+md.smb.istemperaturescaled = 0;
+md.smb.isprecipscaled = 0;
+
+% creating Present day  temperatures
+% Same temperature over the all region:
+tmonth(1:12)=238.15+20.;
+for imonth=0:11
+	md.smb.temperatures_presentday(1:md.mesh.numberofvertices,imonth+1)=tmonth(imonth+1);
+end
+md.smb = initialize(md.smb,md);
+
+% creating precipitation
+for imonth=0:11
+	md.smb.precipitations_presentday(1:md.mesh.numberofvertices,imonth+1)=-0.4*10^(-6)*md.mesh.y+0.5;
+end
+
+% 3 total years of input
+md.smb.temperatures_reconstructed = nan(md.mesh.numberofvertices+1,12*3);
+md.smb.precipitations_reconstructed = nan(md.mesh.numberofvertices+1,12*3);
+
+md.smb.temperatures_reconstructed(1:end-1,1:12) = md.smb.temperatures_presentday(1:end,:);
+md.smb.temperatures_reconstructed(1:end-1,13:24) = md.smb.temperatures_presentday(1:end,:)+1.2;
+md.smb.temperatures_reconstructed(1:end-1,25:36) = md.smb.temperatures_presentday(1:end,:)-0.8;
+
+md.smb.precipitations_reconstructed(1:end-1,1:12) = md.smb.precipitations_presentday(1:end,:);
+md.smb.precipitations_reconstructed(1:end-1,13:24) = md.smb.precipitations_presentday(1:end,:)+0.1;
+md.smb.precipitations_reconstructed(1:end-1,25:36) = md.smb.precipitations_presentday(1:end,:)-0.1;
+
+tim1 = [1/12:1/12:1];
+
+md.smb.temperatures_reconstructed(end,1:12) = tim1; md.smb.temperatures_reconstructed(end,13:24) = tim1+3; md.smb.temperatures_reconstructed(end,25:36) = tim1+5;
+md.smb.precipitations_reconstructed(end,1:12) = tim1; md.smb.precipitations_reconstructed(end,13:24) = tim1+3; md.smb.precipitations_reconstructed(end,25:36) = tim1+5;
+
+% creating initialization and spc temperatures initialization and
+% spc
+md.thermal.spctemperature=mean(md.smb.temperatures_presentday(1:md.mesh.numberofvertices,1:12),2)-10; %-10*ones(md.mesh.numberofvertices,1);
+md.initialization.temperature=md.thermal.spctemperature; %md.smb.temperatures_presentday(1:md.mesh.numberofvertices,1);
+
+
+md.smb.s0p = (max(md.geometry.surface,0));
+md.smb.s0t = (max(md.geometry.surface,0));
+md.smb.issetpddfac = 1;
+md.smb.pddfac_snow =4.3;
+md.smb.pddfac_ice = 8.3;
+
+md=extrude(md,5,1.2);
+md=setflowequation(md,'HO','all');
+md.settings.results_on_nodes={'Temperature','Waterfraction','Enthalpy'};
+
+md.thermal.isenthalpy=1;
+md.thermal.isdynamicbasalspc=1;
+md.thermal.fe = 'P1xP2';
+%md.thermal.spctemperature(find(md.mesh.vertexonbase)) = NaN;
+
+md.initialization.waterfraction= zeros(md.mesh.numberofvertices,1);
+md.initialization.watercolumn  = zeros(md.mesh.numberofvertices,1);
+md.initialization.enthalpy = zeros(md.mesh.numberofvertices,1);
+md.thermal.isdrainicecolumn=0;
+
+md=solve(md,'thermal');
+
+md.initialization.temperature=md.results.ThermalSolution.Temperature;
+md.initialization.enthalpy=md.results.ThermalSolution.Enthalpy;
+%x = find(md.initialization.temperature<210);
+%md.initialization.temperature(x) = 268;
+md.materials.rheology_B = cuffey(md.initialization.temperature);
+
+% Friction
+TEMP = zeros(md.mesh.numberofvertices,1);
+TEMP(md.mesh.elements) = md.initialization.temperature(:,1:6);
+
+temperature =TEMP;
+pressure = md.initialization.pressure;%md.materials.rho_ice*md.constants.g*md.geometry.thickness;
+Tm = md.materials.meltingpoint-md.materials.beta*pressure;
+
+md.friction.coefficient = md.friction.coefficient;%averaging(md,md.friction.coefficient,2);
+md.friction=frictionjosh(md.friction);
+md.friction.pressure_adjusted_temperature = temperature - Tm;
+md.friction.gamma= 5;
+
+% time steps and resolution
+md.timestepping.time_step=0.5;
+md.settings.output_frequency=1;
+md.timestepping.final_time=2;
+md.timestepping.interp_forcings=0;
+
+%md.transient.requested_outputs={'default','IceVolumeAboveFloatation','IceVolume','TemperaturePDD','SmbMonthlytemperatures','SmbPrecipitation'};
+md.transient.requested_outputs={'default','IceVolumeAboveFloatation','IceVolume','TemperaturePDD'};
+md=setflowequation(md,'SSA','all');
+md.cluster=generic('name',oshostname(),'np',1); % 3 for the cluster
+md=solve(md,'Transient');
+
+field_names = {...
+	'Vx1','Vy1','Vz1','Vel1','Pressure1','Bed1','Surface1','Thickness1','Temperature1','Enthalpy1','SmbMassBalance1', ...
+	'Vx2','Vy2','Vz2','Vel2','Pressure2','Bed2','Surface2','Thickness2','Temperature2','Enthalpy2','SmbMassBalance2', ...
+	'Vx3','Vy3','Vz3','Vel3','Pressure3','Bed3','Surface3','Thickness3','Temperature3','Enthalpy3','SmbMassBalance3', ...
+	'Vx4','Vy4','Vz4','Vel4','Pressure4','Bed4','Surface4','Thickness4','Temperature4','Enthalpy4','SmbMassBalance4'};
+field_tolerances={...
+	1e-09,1e-09,1e-09,1e-09,1e-10,1e-10,1e-10,1e-10,1e-10,1e-10,1e-13,...
+	1e-09,1e-09,1e-09,1e-09,1e-10,1e-10,1e-10,1e-10,1e-10,1e-10,1e-13,...
+	1e-09,1e-09,1e-09,1e-09,1e-10,1e-10,1e-10,1e-10,1e-10,1e-10,1e-13,...
+	1e-09,1e-09,1e-09,1e-09,1e-10,1e-10,1e-10,1e-10,1e-10,1e-10,1e-13};
+field_values={...
+	(md.results.TransientSolution(1).Vx),...
+	(md.results.TransientSolution(1).Vy),...
+	(md.results.TransientSolution(1).Vz),...
+	(md.results.TransientSolution(1).Vel),...
+	(md.results.TransientSolution(1).Pressure),...
+	(md.results.TransientSolution(1).Base),...
+	(md.results.TransientSolution(1).Surface),...
+	(md.results.TransientSolution(1).Thickness),...
+	(md.results.TransientSolution(1).Temperature),...
+	(md.results.TransientSolution(1).Enthalpy),...
+	(md.results.TransientSolution(1).SmbMassBalance),...
+	(md.results.TransientSolution(2).Vx),...
+	(md.results.TransientSolution(2).Vy),...
+	(md.results.TransientSolution(2).Vz),...
+	(md.results.TransientSolution(2).Vel),...
+	(md.results.TransientSolution(2).Pressure),...
+	(md.results.TransientSolution(2).Base),...
+	(md.results.TransientSolution(2).Surface),...
+	(md.results.TransientSolution(2).Thickness),...
+	(md.results.TransientSolution(2).Temperature),...
+	(md.results.TransientSolution(2).Enthalpy),...
+	(md.results.TransientSolution(2).SmbMassBalance),...
+	(md.results.TransientSolution(3).Vx),...
+	(md.results.TransientSolution(3).Vy),...
+	(md.results.TransientSolution(3).Vz),...
+	(md.results.TransientSolution(3).Vel),...
+	(md.results.TransientSolution(3).Pressure),...
+	(md.results.TransientSolution(3).Base),...
+	(md.results.TransientSolution(3).Surface),...
+	(md.results.TransientSolution(3).Thickness),...
+	(md.results.TransientSolution(3).Temperature),...
+	(md.results.TransientSolution(3).Enthalpy),...
+	(md.results.TransientSolution(3).SmbMassBalance),...
+	(md.results.TransientSolution(4).Vx),...
+	(md.results.TransientSolution(4).Vy),...
+	(md.results.TransientSolution(4).Vz),...
+	(md.results.TransientSolution(4).Vel),...
+	(md.results.TransientSolution(4).Pressure),...
+	(md.results.TransientSolution(4).Base),...
+	(md.results.TransientSolution(4).Surface),...
+	(md.results.TransientSolution(4).Thickness),...
+	(md.results.TransientSolution(4).Temperature),...
+	(md.results.TransientSolution(4).Enthalpy),...
+	(md.results.TransientSolution(4).SmbMassBalance),...
+	};
Index: /issm/trunk/test/NightlyRun/test250.py
===================================================================
--- /issm/trunk/test/NightlyRun/test250.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test250.py	(revision 24686)
@@ -24,5 +24,5 @@
 
 smb = np.ones((md.mesh.numberofvertices, )) * 3.6
-smb = np.array([smb, smb * - 1]).T
+smb = np.array([smb, smb * -1]).T
 
 md.smb.mass_balance = smb
Index: /issm/trunk/test/NightlyRun/test251.py
===================================================================
--- /issm/trunk/test/NightlyRun/test251.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test251.py	(revision 24686)
@@ -24,5 +24,5 @@
 
 smb = np.ones((md.mesh.numberofvertices, )) * 3.6
-smb = np.array([smb, smb * - 1]).T
+smb = np.array([smb, smb * -1]).T
 
 md.smb.mass_balance = smb
Index: /issm/trunk/test/NightlyRun/test318.m
===================================================================
--- /issm/trunk/test/NightlyRun/test318.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test318.m	(revision 24686)
@@ -3,5 +3,5 @@
 md=setmask(md,'','');
 md=parameterize(md,'../Par/SquareSheetConstrained.par');
-md=extrude(md,4,1.);
+md=extrude(md,5,1.);
 md=setflowequation(md,'SIA','all');
 md.cluster=generic('name',oshostname(),'np',3);
Index: /issm/trunk/test/NightlyRun/test318.py
===================================================================
--- /issm/trunk/test/NightlyRun/test318.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test318.py	(revision 24686)
@@ -12,5 +12,5 @@
 md = setmask(md, '', '')
 md = parameterize(md, '../Par/SquareSheetConstrained.py')
-md.extrude(4, 1.)
+md.extrude(5, 1.)
 md = setflowequation(md, 'SIA', 'all')
 md.cluster = generic('name', gethostname(), 'np', 3)
Index: /issm/trunk/test/NightlyRun/test328.m
===================================================================
--- /issm/trunk/test/NightlyRun/test328.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test328.m	(revision 24686)
@@ -15,5 +15,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Vx1','Vy1','Vel1','Bed1','Surface1','Thickness1','SMB1','TotalSmb1','Vx2','Vy2','Vel2','Bed2','Surface2','Thickness2','SMB2','TotalSmb2','Vx3','Vy3','Vel3','Bed3','Surface3','Thickness3','SMB3','TotalSmb3'};
-field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1.5e-13,1e-13};
+field_tolerances={1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,2e-13,1e-13};
 field_values={...
 	(md.results.TransientSolution(1).Vx),...
Index: /issm/trunk/test/NightlyRun/test328.py
===================================================================
--- /issm/trunk/test/NightlyRun/test328.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test328.py	(revision 24686)
@@ -38,5 +38,5 @@
                     1e-13, 1e-13, 1e-13,
                     1e-13, 1e-13, 1e-13,
-                    1e-13, 1e-13, 1e-13]
+                    1e-13, 2e-13, 1e-13]
 field_values = [md.results.TransientSolution[0].Vx,
                 md.results.TransientSolution[0].Vy,
Index: sm/trunk/test/NightlyRun/test330.m
===================================================================
--- /issm/trunk/test/NightlyRun/test330.m	(revision 24685)
+++ 	(revision )
@@ -1,48 +1,0 @@
-%Test Name:UnConfinedHydroDC
-md=triangle(model(),'../Exp/Strip.exp',10000.);
-md=setmask(md,'','');
-%reduced slab (20m long)
-md.mesh.x=md.mesh.x/5.0e3;
-md.mesh.y=md.mesh.y/5.0e3;
-md=parameterize(md,'../Par/IceCube.par');
-md.transient=deactivateall(md.transient);
-md.transient.ishydrology=1;
-md=setflowequation(md,'SSA','all');
-md.cluster=generic('name',oshostname(),'np',1);
-md.hydrology=(hydrologydc);
-md.hydrology=initialize(md.hydrology,md);
-
-%Hydro Model Parameters
-md.hydrology.isefficientlayer=0;
-md.hydrology.sedimentlimit_flag=0;
-md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
-md.hydrology.rel_tol=1.0e-6;
-md.hydrology.penalty_lock=0;
-md.hydrology.max_iter=200;
-md.hydrology.transfer_flag=0;
-md.hydrology.unconfined_flag=1;
-md.hydrology.sediment_porosity=0.1;
-%Sediment
-md.hydrology.sediment_thickness=10.0;
-md.hydrology.sediment_transmitivity=(1.0e-3*md.hydrology.sediment_thickness)*ones(md.mesh.numberofvertices,1);
-%init
-md.initialization.sediment_head=-5.0*ones(md.mesh.numberofvertices,1);
-%BC
-md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
-pos=find(md.mesh.x==0);
-md.hydrology.spcsediment_head(pos)=0.5;
-
-md.timestepping.time_step=5/md.constants.yts; %5s steppin
-md.settings.output_frequency=2;
-md.timestepping.final_time=300/md.constants.yts; %500s run
-
-md=solve(md,'Transient');
-
-%fields to track, results can also be found in
-%Wang 2009 Fig 6b (jouranl of Hydrology)
-field_names={'SedimentWaterHead1',...
-	     'SedimentWaterHead2'};
-field_tolerances={1e-13,...
-		  1e-13};
-field_values={md.results.TransientSolution(11).SedimentHead,...
-	      md.results.TransientSolution(31).SedimentHead};
Index: sm/trunk/test/NightlyRun/test330.py
===================================================================
--- /issm/trunk/test/NightlyRun/test330.py	(revision 24685)
+++ 	(revision )
@@ -1,55 +1,0 @@
-#Test Name:UnConfinedHydroDC
-import numpy as np
-from model import *
-from setmask import *
-from triangle import triangle
-from transient import transient
-from parameterize import parameterize
-from setflowequation import setflowequation
-from solve import solve
-from socket import gethostname
-from generic import generic
-
-md = triangle(model(), '../Exp/Strip.exp', 10000.)
-md = setmask(md, '', '')
-#reduced slab (20m long)
-md.mesh.x = md.mesh.x / 5.0e3
-md.mesh.y = md.mesh.y / 5.0e3
-md = parameterize(md, '../Par/IceCube.py')
-md.transient = transient.setallnullparameters(md.transient)
-md.transient.ishydrology = True
-md = setflowequation(md, 'SSA', 'all')
-md.cluster = generic('name', gethostname(), 'np', 1)
-md.hydrology = hydrologydc()
-md.hydrology = md.hydrology.initialize(md)
-
-#Hydro Model Parameters
-md.hydrology.isefficientlayer = 0
-md.hydrology.sedimentlimit_flag = 0
-md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
-md.hydrology.rel_tol = 1.0e-6
-md.hydrology.penalty_lock = 0
-md.hydrology.max_iter = 200
-md.hydrology.transfer_flag = 0
-md.hydrology.unconfined_flag = 1
-md.hydrology.sediment_porosity = 0.1
-#Sediment
-md.hydrology.sediment_thickness = 10.0
-md.hydrology.sediment_transmitivity = (1.0e-3 * md.hydrology.sediment_thickness) * np.ones((md.mesh.numberofvertices))
-#init
-md.initialization.sediment_head = -5.0 * np.ones((md.mesh.numberofvertices))
-#BC
-md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
-md.hydrology.spcsediment_head[np.where(md.mesh.x == 0)] = 0.5
-
-md.timestepping.time_step = 5 / md.constants.yts  #5s steppin
-md.settings.output_frequency = 2
-md.timestepping.final_time = 300 / md.constants.yts  #500s run
-
-md = solve(md, 'Transient')
-
-#fields to track, results can also be found in
-#Wang 2009 Fig 6b (jouranl of Hydrology)
-field_names = ['SedimentWaterHead1', 'SedimentWaterHead2']
-field_tolerances = [1e-13, 1e-13]
-field_values = [md.results.TransientSolution[10].SedimentHead, md.results.TransientSolution[30].SedimentHead]
Index: sm/trunk/test/NightlyRun/test332.m
===================================================================
--- /issm/trunk/test/NightlyRun/test332.m	(revision 24685)
+++ 	(revision )
@@ -1,32 +1,0 @@
-%Test Name: SquareSheetConstrainedHydrologyDC
-md=triangle(model(),'../Exp/Square.exp',100000.);
-md=setmask(md,'','');
-md=parameterize(md,'../Par/IceCube.par');
-md.transient=deactivateall(md.transient);
-md.transient.ishydrology=1;
-md=setflowequation(md,'SSA','all');
-md.cluster=generic('name',oshostname(),'np',1);
-md.hydrology=(hydrologydc);
-md.hydrology=initialize(md.hydrology,md);
-md.hydrology.isefficientlayer=0;
-md.hydrology.sedimentlimit_flag=1;
-md.hydrology.sedimentlimit=8000.0;
-md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
-md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
-pos=find(md.mesh.y==0);
-md.hydrology.spcsediment_head(pos)=0.0;
-md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
-md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.sediment_transmitivity=3*ones(md.mesh.numberofvertices,1);
-md.timestepping.time_step=0;
-md.timestepping.final_time=1.0;
-md=solve(md,'Hydrology');
-
-%Fields and tolerances to track changes
-%you can also compare with an analitic solution, but it is exact
-%only if no limits are applied
-%analitic=(md.mesh.y.^2-2*md.mesh.y*1.0e6)*(-2.0/(2*md.constants.yts*md.hydrology.sediment_transmitivity))
-field_names     ={'SedimentWaterHead','SedimentHeadResidual'};
-field_tolerances={1e-13, 3e-10};
-field_values={md.results.HydrologySolution.SedimentHead,md.results.HydrologySolution.SedimentHeadResidual};
Index: sm/trunk/test/NightlyRun/test332.py
===================================================================
--- /issm/trunk/test/NightlyRun/test332.py	(revision 24685)
+++ 	(revision )
@@ -1,48 +1,0 @@
-#Test Name: SquareSheetConstrainedHydrologyDC
-import numpy as np
-from model import *
-from socket import gethostname
-from triangle import *
-from setmask import *
-from parameterize import *
-from transient import *
-from setflowequation import *
-from solve import *
-
-
-from generic import generic
-
-md = triangle(model(), '../Exp/Square.exp', 100000.)
-md = setmask(md, '', '')
-md = parameterize(md, '../Par/IceCube.py')
-
-md.transient = transient.setallnullparameters(md.transient)
-md.transient.ishydrology = True
-
-md = setflowequation(md, 'SSA', 'all')
-md.cluster = generic('name', gethostname(), 'np', 1)
-md.hydrology = hydrologydc()
-md.hydrology = md.hydrology.initialize(md)
-
-md.hydrology.isefficientlayer = 0
-md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
-md.hydrology.sedimentlimit_flag = 1
-md.hydrology.sedimentlimit = 8000.0
-md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
-md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
-pos = np.nonzero(md.mesh.y == 0.)[0]
-md.hydrology.spcsediment_head[pos] = 0.0
-md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
-md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices))
-md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
-md.timestepping.time_step = 0
-md.timestepping.final_time = 1.0
-md = solve(md, 'Hydrology')
-
-#Fields and tolerances to track changes
-#you can also compare with an analitic solution, but it is exact
-#only if no limits are applied
-#analitic=(md.mesh.y**2 - 2 * md.mesh.y * 1.0e6) * (-2.0 / (2 * md.constants.yts * md.hydrology.sediment_transmitivity))
-field_names = ['SedimentWaterHead', 'SedimentHeadResidual']
-field_tolerances = [1e-13, 3e-10]
-field_values = [md.results.HydrologySolution.SedimentHead, md.results.HydrologySolution.SedimentHeadResidual]
Index: sm/trunk/test/NightlyRun/test333.m
===================================================================
--- /issm/trunk/test/NightlyRun/test333.m	(revision 24685)
+++ 	(revision )
@@ -1,82 +1,0 @@
-%Test Name: SquareSheetHydrologyDCTwoLayers
-md=triangle(model(),'../Exp/Square.exp',100000.);
-md=setmask(md,'','');
-md=parameterize(md,'../Par/IceCube.par');
-md.transient=deactivateall(md.transient);
-md.transient.ishydrology=1;
-md.transient.issmb=1;
-md=setflowequation(md,'SSA','all');
-md.cluster=generic('name',oshostname(),'np',1);
-md.hydrology=(hydrologydc);
-md.hydrology=initialize(md.hydrology,md);
-md.hydrology.isefficientlayer=1;
-md.hydrology.sedimentlimit_flag=1;
-md.hydrology.sedimentlimit=800.0;
-md.hydrology.transfer_flag = 0;
-md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
-md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
-md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.sediment_transmitivity=3*ones(md.mesh.numberofvertices,1);
-md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
-
-md.initialization.epl_head=0.0*ones(md.mesh.numberofvertices,1);
-md.initialization.epl_thickness=1.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.spcepl_head=NaN*ones(md.mesh.numberofvertices,1);
-md.hydrology.mask_eplactive_node=0*ones(md.mesh.numberofvertices,1);
-md.hydrology.epl_conductivity=30;
-md.hydrology.epl_initial_thickness=1;
-md.hydrology.epl_thick_comp=1;
-md.hydrology.epl_colapse_thickness=1.0e-3;
-md.hydrology.epl_max_thickness=1;
-md.hydrology.steps_per_step=10;
-md.timestepping.time_step=2.0;
-md.timestepping.final_time=2.0;
-
-%md.verbose.solution=1;
-
-md=solve(md,'Transient');
-%re-run with no substeps
-mdfine=md;
-mdfine.results=struct();
-mdfine.hydrology.steps_per_step=1;
-mdfine.timestepping.time_step=0.2;
-mdfine=solve(mdfine,'Transient');
-
-%store=md.constants.g*md.hydrology.sediment_porosity*md.materials.rho_freshwater*((md.hydrology.sediment_compressibility/md.hydrology.sediment_porosity)+md.hydrology.water_compressibility);
-%sedstore=20.0*store;
-%for i=1:10
-%diff=(mean(md.results.HydrologySolution(i).EplHead)*store+ ...
-%			mean(md.results.HydrologySolution(i).SedimentHead)*sedstore-0.4*i)
-%end
-%Fields and tolerances to track changes
-field_names     ={'SedimentWaterHead1','EplWaterHead1','SedimentHeadResidual1',...
-                  'SedimentWaterHead4','EplWaterHead4','SedimentHeadResidual4',...
-                  'SedimentWaterHead5','EplWaterHead5','SedimentHeadResidual5',...
-                  'SedimentWaterHead9','EplWaterHead9','SedimentHeadResidual9',...
-                  'EplWaterHead10', 'EplWaterHeadSubstep10', 'SedimentWaterHead10',...
-		  'SedimentWaterHeadSubstep10'};
-field_tolerances={...
-    1e-13, 1e-13, 1e-13,...
-    1e-13, 1e-13, 1e-13,...
-    1e-13, 5e-12, 1e-11,...
-    1e-13, 5e-12, 1e-11,...
-    1e-13, 1e-13, 1e-13,...
-    1e-13};
-field_values={mdfine.results.TransientSolution(1).SedimentHead, ...
-	      mdfine.results.TransientSolution(1).EplHead,...
-	      mdfine.results.TransientSolution(1).SedimentHeadResidual,...
-	      mdfine.results.TransientSolution(4).SedimentHead,...
-	      mdfine.results.TransientSolution(4).EplHead,...
-	      mdfine.results.TransientSolution(4).SedimentHeadResidual, ...
-	      mdfine.results.TransientSolution(5).SedimentHead,...
-	      mdfine.results.TransientSolution(5).EplHead,...
-	      mdfine.results.TransientSolution(5).SedimentHeadResidual, ...
-	      mdfine.results.TransientSolution(9).SedimentHead,...
-	      mdfine.results.TransientSolution(9).EplHead,...
-	      mdfine.results.TransientSolution(9).SedimentHeadResidual,...
-              md.results.TransientSolution(1).EplHead,...
-              md.results.TransientSolution(1).EplHeadSubstep,...
-              md.results.TransientSolution(1).SedimentHead,...
-              md.results.TransientSolution(1).SedimentHeadSubstep
-              };
Index: sm/trunk/test/NightlyRun/test333.py
===================================================================
--- /issm/trunk/test/NightlyRun/test333.py	(revision 24685)
+++ 	(revision )
@@ -1,88 +1,0 @@
-#Test Name: SquareSheetHydrologyDCTwoLayers
-import numpy as np
-from model import *
-from socket import gethostname
-from triangle import *
-from setmask import *
-from parameterize import *
-from transient import *
-from setflowequation import *
-from solve import *
-from generic import generic
-
-
-md = triangle(model(), '../Exp/Square.exp', 100000.)
-md = setmask(md, '', '')
-md = parameterize(md, '../Par/IceCube.py')
-
-md.transient = transient.setallnullparameters(md.transient)
-md.transient.ishydrology = True
-#md.transient.issmb = True
-md = setflowequation(md, 'SSA', 'all')
-md.cluster = generic('name', gethostname(), 'np', 1)
-md.hydrology = hydrologydc()
-md.hydrology = md.hydrology.initialize(md)
-
-md.hydrology.isefficientlayer = 1
-md.hydrology.sedimentlimit_flag = 1
-md.hydrology.sedimentlimit = 800.0
-md.hydrology.transfer_flag = 0
-md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
-md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
-md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
-
-md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
-md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices))
-md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
-
-md.initialization.epl_head = np.zeros((md.mesh.numberofvertices))
-md.initialization.epl_thickness = np.ones((md.mesh.numberofvertices))
-md.hydrology.spcepl_head = np.nan * np.ones((md.mesh.numberofvertices))
-md.hydrology.mask_eplactive_node = np.zeros((md.mesh.numberofvertices))
-md.hydrology.epl_conductivity = 30
-md.hydrology.epl_initial_thickness = 1
-md.hydrology.epl_colapse_thickness = 1.0e-3
-md.hydrology.epl_thick_comp = 1
-md.hydrology.epl_max_thickness = 1
-md.hydrology.steps_per_step = 10
-md.timestepping.time_step = 2.0
-md.timestepping.final_time = 2.0
-
-#md.debug.valgrind = True
-md = solve(md, 'Transient')
-
-#re-run with no substeps
-mdfine = copy.deepcopy(md)
-mdfine.results = []
-mdfine.hydrology.steps_per_step = 1
-mdfine.timestepping.time_step = 0.2
-mdfine = solve(mdfine, 'Transient')
-
-field_names = ['SedimentWaterHead1', 'EplWaterHead1', 'SedimentHeadResidual1',
-               'SedimentWaterHead4', 'EplWaterHead4', 'SedimentHeadResidual4',
-               'SedimentWaterHead5', 'EplWaterHead5', 'SedimentHeadResidual5',
-               'SedimentWaterHead9', 'EplWaterHead9', 'SedimentHeadResidual9',
-               'EplWaterHead10', 'EplWaterHeadSubstep10', 'SedimentWaterHead10',
-               'SedimentWaterHeadSubstep10']
-field_tolerances = [1e-13, 1e-13, 1e-13,
-                    1e-13, 1e-13, 1e-13,
-                    1e-13, 5e-12, 1e-11,
-                    1e-13, 5e-12, 1e-11,
-                    1e-13, 1e-13, 1e-13,
-                    1e-13]
-field_values = [mdfine.results.TransientSolution[0].SedimentHead,
-                mdfine.results.TransientSolution[0].EplHead,
-                mdfine.results.TransientSolution[0].SedimentHeadResidual,
-                mdfine.results.TransientSolution[3].SedimentHead,
-                mdfine.results.TransientSolution[3].EplHead,
-                mdfine.results.TransientSolution[3].SedimentHeadResidual,
-                mdfine.results.TransientSolution[4].SedimentHead,
-                mdfine.results.TransientSolution[4].EplHead,
-                mdfine.results.TransientSolution[4].SedimentHeadResidual,
-                mdfine.results.TransientSolution[8].SedimentHead,
-                mdfine.results.TransientSolution[8].EplHead,
-                mdfine.results.TransientSolution[8].SedimentHeadResidual,
-                md.results.TransientSolution[-1].EplHead,
-                md.results.TransientSolution[-1].EplHeadSubstep,
-                md.results.TransientSolution[-1].SedimentHead,
-                md.results.TransientSolution[-1].SedimentHeadSubstep]
Index: sm/trunk/test/NightlyRun/test334.m
===================================================================
--- /issm/trunk/test/NightlyRun/test334.m	(revision 24685)
+++ 	(revision )
@@ -1,34 +1,0 @@
-%Test Name: SquareSheetConstrainedExtrudedHydrologyDC
-md=triangle(model(),'../Exp/Square.exp',100000.);
-md=setmask(md,'','');
-md.transient=deactivateall(md.transient);
-md.transient.ishydrology=1;
-md=parameterize(md,'../Par/IceCube.par');
-md=setflowequation(md,'SSA','all');
-md.cluster=generic('name',oshostname(),'np',1);
-md.hydrology=(hydrologydc);
-md.hydrology=initialize(md.hydrology,md);
-md.hydrology.isefficientlayer=0;
-md.hydrology.sedimentlimit_flag=1;
-md.hydrology.sedimentlimit=8000.0;
-md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices, ...
-                                         1);
-md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
-md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
-pos=find(md.mesh.y==0);
-md.hydrology.spcsediment_head(pos)=0.0;
-md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
-md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.sediment_transmitivity= 3.0*ones(md.mesh.numberofvertices,1);
-md.timestepping.time_step=0;
-md.timestepping.final_time=1.0;
-md=extrude(md,3,1.1);
-md=solve(md,'Hydrology');
-
-%Fields and tolerances to track changes
-%you can also compare with an analitic solution, but it is exact
-%only if no limits are applied
-%analitic=(md.mesh.y.^2-2*md.mesh.y*1.0e6)*(-2.0/(2*md.constants.yts*md.hydrology.sediment_transmitivity))
-field_names     ={'SedimentWaterHead','SedimentHeadResidual'};
-field_tolerances={1e-13, 3e-10};
-field_values={md.results.HydrologySolution.SedimentHead,md.results.HydrologySolution.SedimentHeadResidual};
Index: sm/trunk/test/NightlyRun/test334.py
===================================================================
--- /issm/trunk/test/NightlyRun/test334.py	(revision 24685)
+++ 	(revision )
@@ -1,48 +1,0 @@
-#Test Name: SquareSheetConstrainedExtrudedHydrologyDC
-import numpy as np
-from model import *
-from socket import gethostname
-from triangle import *
-from setmask import *
-from parameterize import *
-from transient import *
-from setflowequation import *
-from solve import *
-
-
-from generic import generic
-
-md = triangle(model(), '../Exp/Square.exp', 100000.)
-md = setmask(md, '', '')
-md = parameterize(md, '../Par/IceCube.py')
-md.transient = transient.setallnullparameters(md.transient)
-md.transient.ishydrology = True
-md = setflowequation(md, 'SSA', 'all')
-md.cluster = generic('name', gethostname(), 'np', 1)
-md.hydrology = hydrologydc()
-md.hydrology = md.hydrology.initialize(md)
-
-md.hydrology.isefficientlayer = 0
-md.hydrology.sedimentlimit_flag = 1
-md.hydrology.sedimentlimit = 8000.0
-md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
-md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
-md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
-md.hydrology.spcsediment_head[np.where(md.mesh.y == 0)] = 0.0
-
-md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
-md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices))
-md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
-
-md.timestepping.time_step = 0
-md.timestepping.final_time = 1.0
-md.extrude(3, 1.)
-md = solve(md, 'Hydrology')
-
-#Fields and tolerances to track changes
-#you can also compare with an analitic solution, but it is exact
-#only if no limits are applied
-#analitic=(md.mesh.y.^2 - 2 * md.mesh.y * 1.0e6) * (-2.0 / (2 * md.constants.yts * md.hydrology.sediment_transmitivity))
-field_names = ['SedimentWaterHead', 'SedimentHeadResidual']
-field_tolerances = [1e-13, 3e-10]
-field_values = [md.results.HydrologySolution.SedimentHead, md.results.HydrologySolution.SedimentHeadResidual]
Index: sm/trunk/test/NightlyRun/test335.m
===================================================================
--- /issm/trunk/test/NightlyRun/test335.m	(revision 24685)
+++ 	(revision )
@@ -1,73 +1,0 @@
-%Test Name: SquareSheetExtrudedHydrologyDCTwoLayers
-md=triangle(model(),'../Exp/Square.exp',100000.);
-md=setmask(md,'','');
-md.transient=deactivateall(md.transient);
-md.transient.ishydrology=1;
-md.transient.issmb=1;
-md=parameterize(md,'../Par/IceCube.par');
-md=setflowequation(md,'SSA','all');
-md.cluster=generic('name',oshostname(),'np',1);
-md.hydrology=(hydrologydc);
-md.hydrology=initialize(md.hydrology,md);
-md.hydrology.isefficientlayer=1;
-md.hydrology.sedimentlimit_flag=1;
-md.hydrology.transfer_flag = 0;
-md.hydrology.sedimentlimit=800.0;
-md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
-md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
-md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
-md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.sediment_transmitivity=3*ones(md.mesh.numberofvertices,1);
-
-md.initialization.epl_head=0.0*ones(md.mesh.numberofvertices,1);
-md.initialization.epl_thickness=1.0*ones(md.mesh.numberofvertices,1);
-md.hydrology.spcepl_head=NaN*ones(md.mesh.numberofvertices,1);
-md.hydrology.mask_eplactive_node=0*ones(md.mesh.numberofvertices,1);
-md.hydrology.epl_conductivity=30;
-md.hydrology.epl_initial_thickness=1;
-md.hydrology.epl_colapse_thickness=1.0e-3;
-md.hydrology.epl_thick_comp=1;
-md.hydrology.epl_max_thickness=1;
-md.timestepping.time_step=0.2;
-md.timestepping.final_time=2.0;
-
-%md.verbose.solution=1;
-md=extrude(md,3,1.1);
-md=solve(md,'Transient');
-
-%store=md.constants.g*md.hydrology.sediment_porosity* ...
-%			md.materials.rho_freshwater*((md.hydrology.sediment_compressibility/md.hydrology.sediment_porosity)+md.hydrology.water_compressibility)
-
-%sed=ones(1,size(md.results.HydrologySolution,2));
-%epl=ones(1,size(md.results.HydrologySolution,2));
-%res=ones(1,size(md.results.HydrologySolution,2));
-%input=ones(1,size(md.results.HydrologySolution,2));
-%for i= 1:size(md.results.HydrologySolution,2)
-%	sed(i)=mean(md.results.HydrologySolution(i).SedimentHead);
-%	res(i)=mean(md.results.HydrologySolution(i).SedimentHeadResidual);
-%	epl(i)=mean(md.results.HydrologySolution(i).EplHead);
-%	input(i)=2.0*(i*0.2);
-%end
-
-%Fields and tolerances to track changes
-field_names     ={'SedimentWaterHead1','EplWaterHead1','SedimentHeadResidual1',...
-		  'SedimentWaterHead4','EplWaterHead4','SedimentHeadResidual4',...
-		  'SedimentWaterHead5','EplWaterHead5','SedimentHeadResidual5',...
-		  'SedimentWaterHead9','EplWaterHead9','SedimentHeadResidual9'};
-field_tolerances={1e-13, 1e-13, 1e-13,...
-		  1e-13, 1e-13, 1e-13,...
-		  1e-13, 5e-12, 2e-11,...
-		  1e-13, 5e-12, 2e-11};
-field_values={md.results.TransientSolution(1).SedimentHead, ...
-	      md.results.TransientSolution(1).EplHead,...
-	      md.results.TransientSolution(1).SedimentHeadResidual,...
-	      md.results.TransientSolution(4).SedimentHead,...
-	      md.results.TransientSolution(4).EplHead,...
-	      md.results.TransientSolution(4).SedimentHeadResidual, ...
-	      md.results.TransientSolution(5).SedimentHead,...
-	      md.results.TransientSolution(5).EplHead,...
-	      md.results.TransientSolution(5).SedimentHeadResidual, ...
-	      md.results.TransientSolution(9).SedimentHead,...
-	      md.results.TransientSolution(9).EplHead,...
-	      md.results.TransientSolution(9).SedimentHeadResidual};
Index: sm/trunk/test/NightlyRun/test335.py
===================================================================
--- /issm/trunk/test/NightlyRun/test335.py	(revision 24685)
+++ 	(revision )
@@ -1,72 +1,0 @@
-#Test Name: SquareSheetExtrudedHydrologyDCTwoLayers
-import numpy as np
-from model import *
-from socket import gethostname
-from triangle import *
-from setmask import *
-from parameterize import *
-from transient import *
-from setflowequation import *
-from solve import *
-
-
-from generic import generic
-
-md = triangle(model(), '../Exp/Square.exp', 100000.)
-md = setmask(md, '', '')
-md = parameterize(md, '../Par/IceCube.py')
-md.transient = transient.setallnullparameters(md.transient)
-md.transient.ishydrology = True
-md.transient.issmb = True
-md = setflowequation(md, 'SSA', 'all')
-md.cluster = generic('name', gethostname(), 'np', 1)
-md.hydrology = hydrologydc()
-md.hydrology = md.hydrology.initialize(md)
-
-md.hydrology.isefficientlayer = 1
-md.hydrology.sedimentlimit_flag = 1
-md.hydrology.transfer_flag = 0
-md.hydrology.sedimentlimit = 800.0
-md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
-md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
-md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
-md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
-md.basalforcings.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices))
-md.hydrology.sediment_transmitivity = 3 * np.ones((md.mesh.numberofvertices))
-
-md.initialization.epl_head = np.zeros((md.mesh.numberofvertices))
-md.initialization.epl_thickness = np.ones((md.mesh.numberofvertices))
-md.hydrology.spcepl_head = np.nan * np.ones((md.mesh.numberofvertices))
-md.hydrology.mask_eplactive_node = np.zeros((md.mesh.numberofvertices))
-md.hydrology.epl_conductivity = 30
-md.hydrology.epl_initial_thickness = 1
-md.hydrology.epl_colapse_thickness = 1.0e-3
-md.hydrology.epl_thick_comp = 1
-md.hydrology.epl_max_thickness = 1
-md.timestepping.time_step = 0.2
-md.timestepping.final_time = 2.0
-
-md.extrude(3, 1.)
-md = solve(md, 'Transient')
-
-#Fields and tolerances to track changes
-field_names = ['SedimentWaterHead1', 'EplWaterHead1', 'SedimentHeadResidual1',
-               'SedimentWaterHead4', 'EplWaterHead4', 'SedimentHeadResidual4',
-               'SedimentWaterHead5', 'EplWaterHead5', 'SedimentHeadResidual5',
-               'SedimentWaterHead9', 'EplWaterHead9', 'SedimentHeadResidual9']
-field_tolerances = [1e-13, 1e-13, 1e-13,
-                    1e-13, 1e-13, 1e-13,
-                    1e-13, 5e-12, 2e-11,
-                    1e-13, 5e-12, 2e-11]
-field_values = [md.results.TransientSolution[0].SedimentHead,
-                md.results.TransientSolution[0].EplHead,
-                md.results.TransientSolution[0].SedimentHeadResidual,
-                md.results.TransientSolution[3].SedimentHead,
-                md.results.TransientSolution[3].EplHead,
-                md.results.TransientSolution[3].SedimentHeadResidual,
-                md.results.TransientSolution[4].SedimentHead,
-                md.results.TransientSolution[4].EplHead,
-                md.results.TransientSolution[4].SedimentHeadResidual,
-                md.results.TransientSolution[8].SedimentHead,
-                md.results.TransientSolution[8].EplHead,
-                md.results.TransientSolution[8].SedimentHeadResidual]
Index: /issm/trunk/test/NightlyRun/test336.py
===================================================================
--- /issm/trunk/test/NightlyRun/test336.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test336.py	(revision 24686)
@@ -22,5 +22,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb = SMBcomponents()
Index: /issm/trunk/test/NightlyRun/test337.py
===================================================================
--- /issm/trunk/test/NightlyRun/test337.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test337.py	(revision 24686)
@@ -23,5 +23,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb = SMBcomponents()
Index: /issm/trunk/test/NightlyRun/test338.py
===================================================================
--- /issm/trunk/test/NightlyRun/test338.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test338.py	(revision 24686)
@@ -22,5 +22,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb = SMBmeltcomponents()
Index: /issm/trunk/test/NightlyRun/test339.py
===================================================================
--- /issm/trunk/test/NightlyRun/test339.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test339.py	(revision 24686)
@@ -23,5 +23,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb = SMBmeltcomponents()
Index: /issm/trunk/test/NightlyRun/test352.py
===================================================================
--- /issm/trunk/test/NightlyRun/test352.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test352.py	(revision 24686)
@@ -22,5 +22,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 md.smb.mass_balance = np.vstack((smb, [1.5, 3.]))
 md.transient.isthermal = False
Index: /issm/trunk/test/NightlyRun/test353.py
===================================================================
--- /issm/trunk/test/NightlyRun/test353.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test353.py	(revision 24686)
@@ -22,5 +22,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb = SMBcomponents()
Index: /issm/trunk/test/NightlyRun/test354.py
===================================================================
--- /issm/trunk/test/NightlyRun/test354.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test354.py	(revision 24686)
@@ -22,5 +22,5 @@
 #Set up transient
 smb = np.ones((md.mesh.numberofvertices)) * 3.6
-smb = np.vstack((smb, smb * - 1.)).T
+smb = np.vstack((smb, smb * -1.)).T
 
 md.smb = SMBmeltcomponents()
Index: /issm/trunk/test/NightlyRun/test4001.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4001.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test4001.m	(revision 24686)
@@ -6,68 +6,69 @@
 final_time=1;
 
-!rm -rf $ISSM_DIR/test/MITgcm/install
-!rm -rf $ISSM_DIR/test/MITgcm/build/*
-!rm $ISSM_DIR/test/MITgcm/code/SIZE.h
+!rm -rf ${ISSM_DIR}/test/MITgcm/install
+!rm -rf ${ISSM_DIR}/test/MITgcm/build/*
+!rm -f ${ISSM_DIR}/test/MITgcm/code/SIZE.h
+!rm -rf Models
 
 %Organizer
-mkdir Models
-org=organizer('repository','Models/','prefix','IceOcean.','steps',steps);
-
-presentdirectory=pwd; 
-
-% {{{ Parameters: 
+!mkdir Models
+org=organizer('repository','Models','prefix','IceOcean.','steps',steps);
+
+presentdirectory=pwd;
+
+% {{{ Parameters:
 if perform(org,'Parameters'),
-	Nx=20; %number of longitude cells
-	Ny=40; %number of latitude cells
-	Nz=30; %number of MITgcm vertical cells
-	nPx=2; %number of MITgcm processes to use in x direction
-	nPy=4; %number of MITgcm processes to use in y direction
-	xgOrigin=0; %origin of longitude
-	ygOrigin=-80; %origin of latitude
-	dLong=.25; %longitude grid spacing
-	dLat=.05; %latitude grid spacing
-	delZ=30; %thickness of vertical levels
-	icefront_position_ratio=.75; 
-	ice_thickness=100;
-	rho_ice=917;
-	rho_water=1028.14;
-	di=rho_ice/rho_water;
-
-	% MITgcm initial and lateral boundary conditions
-	iniSalt  = 34.4; % initial salinity (PSU)
-	iniTheta = -1.9; % initial potential temperature (deg C)
-	obcSalt  = 34.4; % open boundary salinity (PSU)
-	obcTheta =  1.0; % open boundary potential temperature (deg C)
-	mlDepth  = 120.; % mixed layer depth (m)
-	mlSalt   = 33.4; % open boundary salinity (PSU)
-	mlTheta  = -1.9; % open boundary potential temperature (deg C)
-	obcUvel  = -0.1; % open boundary velocity (m/s)        
-
-	MITgcmDeltaT=600; % MITgcm time step in seconds
-	y2s=31536000; % year to seconds conversion, i.e., seconds per year
-
-	% start_time, final_time, and time_step
-	start_time=0; % in decimal years
-	time_step=1/12; % coupling interval in decimal years
-	async_step_MITgcm_multiplier=1/30; % used to reduce run time for MItgcm
-
-	% bedrock/bathymetry
-	hmax=1000;
-	trough_depth=200;
-	deltah=300;
-	sea_level=1095;
-
-	% issm settings:
-	numlayers=10;
-
-	savedata(org, Nx, Ny, nPx, nPy, Nz, dLong, dLat, delZ, xgOrigin, ...
-		ygOrigin, icefront_position_ratio, ice_thickness, rho_ice, ...
-		rho_water, di, hmax, trough_depth, deltah, sea_level, ...
-		iniSalt, iniTheta, obcSalt, obcTheta, mlDepth, mlSalt, ...
-		mlTheta, obcUvel, start_time, time_step, MITgcmDeltaT, y2s,...
-		numlayers,async_step_MITgcm_multiplier);
-end
-% }}}
-% {{{ Bathymetry: 
+    Nx=20; %number of longitude cells
+    Ny=40; %number of latitude cells
+    Nz=30; %number of MITgcm vertical cells
+    nPx=2; %number of MITgcm processes to use in x direction
+    nPy=4; %number of MITgcm processes to use in y direction
+    xgOrigin=0; %origin of longitude
+    ygOrigin=-80; %origin of latitude
+    dLong=.25; %longitude grid spacing
+    dLat=.05; %latitude grid spacing
+    delZ=30; %thickness of vertical levels
+    icefront_position_ratio=.75;
+    ice_thickness=100;
+    rho_ice=917;
+    rho_water=1028.14;
+    di=rho_ice/rho_water;
+
+    % MITgcm initial and lateral boundary conditions
+    iniSalt  = 34.4; % initial salinity (PSU)
+    iniTheta = -1.9; % initial potential temperature (deg C)
+    obcSalt  = 34.4; % open boundary salinity (PSU)
+    obcTheta =  1.0; % open boundary potential temperature (deg C)
+    mlDepth  = 120.; % mixed layer depth (m)
+    mlSalt   = 33.4; % open boundary salinity (PSU)
+    mlTheta  = -1.9; % open boundary potential temperature (deg C)
+    obcUvel  = -0.1; % open boundary velocity (m/s)
+
+    MITgcmDeltaT=600; % MITgcm time step in seconds
+    y2s=31536000; % year to seconds conversion, i.e., seconds per year
+
+    % start_time, final_time, and time_step
+    start_time=0; % in decimal years
+    time_step=1/12; % coupling interval in decimal years
+    async_step_MITgcm_multiplier=1/30; % used to reduce run time for MItgcm
+
+    % bedrock/bathymetry
+    hmax=1000;
+    trough_depth=200;
+    deltah=300;
+    sea_level=1095;
+
+    % issm settings:
+    numlayers=10;
+
+    savedata(org, Nx, Ny, nPx, nPy, Nz, dLong, dLat, delZ, xgOrigin, ...
+        ygOrigin, icefront_position_ratio, ice_thickness, rho_ice, ...
+        rho_water, di, hmax, trough_depth, deltah, sea_level, ...
+        iniSalt, iniTheta, obcSalt, obcTheta, mlDepth, mlSalt, ...
+        mlTheta, obcUvel, start_time, time_step, MITgcmDeltaT, y2s,...
+        numlayers,async_step_MITgcm_multiplier);
+end
+% }}}
+% {{{ Bathymetry:
 if perform(org,'Bathymetry'),
 
@@ -94,47 +95,47 @@
 end
 % }}}
-% {{{ IceSheetGeometry: 
+% {{{ IceSheetGeometry:
 if perform(org,'IceSheetGeometry'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'Bathymetry');
-	latmin=min(lat(:));
-	latmax=max(lat(:));
-
-	%put ice_thickness constant layer of ice over the bathymetry, unless it floats: 
-	s=size(bathymetry);
-	thickness=ice_thickness*ones(s);
-
-	%figure out ice shelf: 
-	pos=find(-di*thickness>bathymetry);
-	iceshelf_mask=zeros(s);
-	iceshelf_mask(pos)=1;
-
-	ice_mask=ones(s);
-	pos=find((lat-latmin)/(latmax-latmin)>(icefront_position_ratio));
-	ice_mask(pos)=0;
-	iceshelf_mask(pos)=0;
-
-	%compute draft of ice shelf: 
-	draft=bathymetry;
-	pos=find(iceshelf_mask);
-	draft(pos)=-di*thickness(pos);
-	pos=find(~ice_mask); 
-	draft(pos)=0;
-
-	savedata(org,ice_mask,iceshelf_mask,draft,thickness);
+
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    latmin=min(lat(:));
+    latmax=max(lat(:));
+
+    %put ice_thickness constant layer of ice over the bathymetry, unless it floats:
+    s=size(bathymetry);
+    thickness=ice_thickness*ones(s);
+
+    %figure out ice shelf:
+    pos=find(-di*thickness>bathymetry);
+    iceshelf_mask=zeros(s);
+    iceshelf_mask(pos)=1;
+
+    ice_mask=ones(s);
+    pos=find((lat-latmin)/(latmax-latmin)>(icefront_position_ratio));
+    ice_mask(pos)=0;
+    iceshelf_mask(pos)=0;
+
+    %compute draft of ice shelf:
+    draft=bathymetry;
+    pos=find(iceshelf_mask);
+    draft(pos)=-di*thickness(pos);
+    pos=find(~ice_mask);
+    draft(pos)=0;
+
+    savedata(org,ice_mask,iceshelf_mask,draft,thickness);
 end
 % }}}
 
 %Configure MITgcm
-% {{{ GetMITgcm: 
+% {{{ GetMITgcm:
 if perform(org,'GetMITgcm'),
   system([pwd '/../MITgcm/get_mitgcm.sh']);
 end
 % }}}
-% {{{ BuildMITgcm: 
+% {{{ BuildMITgcm:
 if perform(org,'BuildMITgcm'),
 
-    %load data: 
+    %load data:
     loaddata(org,'Parameters');
 
@@ -148,5 +149,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,'     &           sNx =  20,'),
                 fprintf(fido,'%s%i%s\n','     &           sNx =  ',round(Nx/nPx),',');
@@ -176,15 +177,15 @@
 % }}}
 addpath(recursivepath([pwd '/../MITgcm']));
-% {{{ RunUncoupledMITgcm: 
+% {{{ RunUncoupledMITgcm:
 if perform(org,'RunUncoupledMITgcm'),
 
-    %load data: 
+    %load data:
     loaddata(org,'Parameters');
     loaddata(org,'Bathymetry');
     loaddata(org,'IceSheetGeometry');
-	 endtime = round(MITgcmDeltaT * ...
-		 floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
-
-    % {{{ prepare MITgcm 
+     endtime = round(MITgcmDeltaT * ...
+         floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
+
+    % {{{ prepare MITgcm
     % rename previous run directory and create new one
     if exist ('run.old')
@@ -198,6 +199,6 @@
     !\cp ../MITgcm/input/* run
     !\cp ../MITgcm/input/eedata_uncoupled run/eedata
-    
-    %load data: 
+
+    %load data:
     loaddata(org,'Parameters');
 
@@ -205,14 +206,14 @@
     S=iniSalt*ones(Nx,Ny,Nz);
     writebin('run/Salt.bin',S);
-    
+
     % initial temperature
     T=iniTheta*ones(Nx,Ny,Nz);
     writebin('run/Theta.bin',T);
-    
+
     % initial velocity
     Z=zeros(Nx,Ny,Nz);
     writebin('run/Uvel.bin',Z);
     writebin('run/Vvel.bin',Z);
-    
+
     % initial sea surface height
     Z=zeros(Nx,Ny);
@@ -235,5 +236,5 @@
     U=obcUvel*ones(Ny,Nz);
     writebin('run/OBu.bin',U);
-    
+
     % zero boundary conditions
     Z=zeros(Ny,Nz);
@@ -248,5 +249,5 @@
         tline = fgetl(fidi);
         if ~ischar(tline), break, end
-        %do the change here: 
+        %do the change here:
         if strcmpi(tline,' OB_Iwest = 40*1,'),
             fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
@@ -268,8 +269,8 @@
     % }}}
 
-    %start looping:  
+    %start looping:
     for t=start_time:time_step:final_time,
         disp(['Year: ' num2str(t)])
-        % {{{ generate MITgcm parameter file data 
+        % {{{ generate MITgcm parameter file data
         fidi=fopen('../MITgcm/input/data','r');
         fido=fopen('run/data','w');
@@ -279,5 +280,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,' xgOrigin = 0.0,'),
                 fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
@@ -369,207 +370,207 @@
 
 %Configure ISSM
-% {{{ CreateMesh: 
+% {{{ CreateMesh:
 if perform(org,'CreateMesh'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
-
-	%create model: 
-	md=model();
-	
-	%Grab lat,long from MITgcm: 
-	lat=lat(:);
-	long=long(:);
-
-	%project lat,long: 
-	[x,y]=ll2xy(lat,long,-1);
-
-	index=[];
-	%  C  D
-	%  A  B 
-	for j=1:Ny-1,
-		for i=1:Nx-1, 
-			A=(j-1)*Nx+i;
-			B=(j-1)*Nx+i+1;
-			C=j*Nx+i;
-			D=j*Nx+i+1;
-			index(end+1,:)=[A B C];
-			index(end+1,:)=[C B D];
-		end
-	end
-
-	%fill mesh and model: 
-	md=meshconvert(md,index,x,y);
-	md.mesh.lat=lat;
-	md.mesh.long=long;
-
-	savemodel(org,md);
-
-end
-% }}}
-% {{{ MeshGeometry: 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+
+    %create model:
+    md=model();
+
+    %Grab lat,long from MITgcm:
+    lat=lat(:);
+    long=long(:);
+
+    %project lat,long:
+    [x,y]=ll2xy(lat,long,-1);
+
+    index=[];
+    %  C  D
+    %  A  B
+    for j=1:Ny-1,
+        for i=1:Nx-1,
+            A=(j-1)*Nx+i;
+            B=(j-1)*Nx+i+1;
+            C=j*Nx+i;
+            D=j*Nx+i+1;
+            index(end+1,:)=[A B C];
+            index(end+1,:)=[C B D];
+        end
+    end
+
+    %fill mesh and model:
+    md=meshconvert(md,index,x,y);
+    md.mesh.lat=lat;
+    md.mesh.long=long;
+
+    savemodel(org,md);
+
+end
+% }}}
+% {{{ MeshGeometry:
 if perform(org,'MeshGeometry'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'CreateMesh');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
-
-	%transfer to vertices: 
-	bathymetry=bathymetry(:);
-	iceshelf_mask=iceshelf_mask(:);
-	ice_mask=ice_mask(:);
-	thickness=thickness(:);
-	draft=draft(:);
-
-	%start filling some of the fields 
-	md.geometry.bed=bathymetry;
-	md.geometry.thickness=thickness;
-	md.geometry.base=md.geometry.bed; 
-	pos=find(iceshelf_mask); md.geometry.base(pos)=draft(pos);
-	md.geometry.surface=md.geometry.base+md.geometry.thickness;
-
-	%nothing passes icefront: 
-	pos=find(~ice_mask);
-	md.geometry.thickness(pos)=1;
-	md.geometry.surface(pos)=(1-di)*md.geometry.thickness(pos);
-	md.geometry.base(pos)=-di*md.geometry.thickness(pos);
-
-	%level sets: 
-	md.mask.groundedice_levelset=-ones(md.mesh.numberofvertices,1);
-	md.mask.ice_levelset=ones(md.mesh.numberofvertices,1);
-
-	pos=find(ice_mask); md.mask.ice_levelset(pos)=-1; 
-	pos=find(~iceshelf_mask & ice_mask); md.mask.groundedice_levelset(pos)=1;
-
-	%identify edges of grounded ice: 
-	groundedice_levelset=md.mask.groundedice_levelset;
-	for i=1:md.mesh.numberofelements,
-		m=groundedice_levelset(md.mesh.elements(i,:));
-		if abs(sum(m))~=3,
-			pos=find(m==1); md.mask.groundedice_levelset(md.mesh.elements(i,pos))=0;
-		end
-	end
-
-	%identify edges of ice: 
-	ice_levelset=md.mask.ice_levelset;
-	for i=1:md.mesh.numberofelements,
-		m=ice_levelset(md.mesh.elements(i,:));
-		if abs(sum(m))~=3,
-			pos=find(m==-1); md.mask.ice_levelset(md.mesh.elements(i,pos))=0;
-		end
-	end
-
-	savemodel(org,md);
-end
-% }}}
-% {{{ ParameterizeIce: 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'CreateMesh');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+
+    %transfer to vertices:
+    bathymetry=bathymetry(:);
+    iceshelf_mask=iceshelf_mask(:);
+    ice_mask=ice_mask(:);
+    thickness=thickness(:);
+    draft=draft(:);
+
+    %start filling some of the fields
+    md.geometry.bed=bathymetry;
+    md.geometry.thickness=thickness;
+    md.geometry.base=md.geometry.bed;
+    pos=find(iceshelf_mask); md.geometry.base(pos)=draft(pos);
+    md.geometry.surface=md.geometry.base+md.geometry.thickness;
+
+    %nothing passes icefront:
+    pos=find(~ice_mask);
+    md.geometry.thickness(pos)=1;
+    md.geometry.surface(pos)=(1-di)*md.geometry.thickness(pos);
+    md.geometry.base(pos)=-di*md.geometry.thickness(pos);
+
+    %level sets:
+    md.mask.groundedice_levelset=-ones(md.mesh.numberofvertices,1);
+    md.mask.ice_levelset=ones(md.mesh.numberofvertices,1);
+
+    pos=find(ice_mask); md.mask.ice_levelset(pos)=-1;
+    pos=find(~iceshelf_mask & ice_mask); md.mask.groundedice_levelset(pos)=1;
+
+    %identify edges of grounded ice:
+    groundedice_levelset=md.mask.groundedice_levelset;
+    for i=1:md.mesh.numberofelements,
+        m=groundedice_levelset(md.mesh.elements(i,:));
+        if abs(sum(m))~=3,
+            pos=find(m==1); md.mask.groundedice_levelset(md.mesh.elements(i,pos))=0;
+        end
+    end
+
+    %identify edges of ice:
+    ice_levelset=md.mask.ice_levelset;
+    for i=1:md.mesh.numberofelements,
+        m=ice_levelset(md.mesh.elements(i,:));
+        if abs(sum(m))~=3,
+            pos=find(m==-1); md.mask.ice_levelset(md.mesh.elements(i,pos))=0;
+        end
+    end
+
+    savemodel(org,md);
+end
+% }}}
+% {{{ ParameterizeIce:
 if perform(org,'ParameterizeIce'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'CreateMesh');
-	loaddata(org,'MeshGeometry');
-
-	%miscellaneous
-	md.miscellaneous.name='test4001';
-
-	%initial velocity: 
-	md.initialization.vx=zeros(md.mesh.numberofvertices,1);
-	md.initialization.vy=zeros(md.mesh.numberofvertices,1);
-	md.initialization.vz=zeros(md.mesh.numberofvertices,1);
-
-	%friction: 
-	md.friction.coefficient=30*ones(md.mesh.numberofvertices,1);
-	pos=find(md.mask.groundedice_levelset<=0);
-	md.friction.coefficient(pos)=0;
-	md.friction.p=ones(md.mesh.numberofelements,1);
-	md.friction.q=ones(md.mesh.numberofelements,1);
-
-	%temperatures and surface mass balance:
-	md.initialization.temperature=(273.15-20)*ones(md.mesh.numberofvertices,1);
-	md.initialization.pressure=md.materials.rho_ice*md.constants.g*(md.geometry.surface-md.geometry.base);
-	md.smb.mass_balance = [1*ones(md.mesh.numberofvertices,1); 1];
-
-	%Flow law 
-	md.materials.rheology_B=paterson(md.initialization.temperature);
-	md.materials.rheology_n=3*ones(md.mesh.numberofelements,1);
-	md.damage.D=zeros(md.mesh.numberofvertices,1);
-	md.damage.spcdamage=NaN*ones(md.mesh.numberofvertices,1);
-	
-	%the spcs going
-	md.stressbalance.spcvx=NaN*ones(md.mesh.numberofvertices,1);
-	md.stressbalance.spcvy=NaN*ones(md.mesh.numberofvertices,1);
-	md.stressbalance.spcvz=NaN*ones(md.mesh.numberofvertices,1);
-	md.stressbalance.referential=NaN*ones(md.mesh.numberofvertices,6);
-	md.stressbalance.loadingforce=0*ones(md.mesh.numberofvertices,3);
-	md.masstransport.spcthickness=NaN*ones(md.mesh.numberofvertices,1); 
-
-	%deal with water: 
-	pos=find(md.mask.ice_levelset>0); 
-	md.stressbalance.spcvx(pos)=0;
-	md.stressbalance.spcvy(pos)=0;
-	md.stressbalance.spcvz(pos)=0;
-	md.masstransport.spcthickness(pos)=0;
-
-	%get some flux at the ice divide: 
-	pos=find(md.mesh.lat==min(md.mesh.lat));
-	md.stressbalance.spcvy(pos)=200;
-
-	%deal with boundaries, excluding icefront: 
-	vertex_on_boundary=zeros(md.mesh.numberofvertices,1);
-	vertex_on_boundary(md.mesh.segments(:,1:2))=1;
-	pos=find(vertex_on_boundary & md.mask.groundedice_levelset<=0);
-	md.stressbalance.spcvx(pos)=md.initialization.vx(pos);
-	md.stressbalance.spcvy(pos)=md.initialization.vy(pos);
-	md.stressbalance.spcvz(pos)=md.initialization.vz(pos);
-	md.masstransport.spcthickness(pos)=md.geometry.thickness(pos);
-
-	md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
-	md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
-	md.thermal.spctemperature=[md.initialization.temperature; 1]; %impose observed temperature on surface
-	md.basalforcings.geothermalflux=.064*ones(md.mesh.numberofvertices,1);
-
-	%flow equations: 
-	md=setflowequation(md,'SSA','all');
-
-	savemodel(org,md);
-end
-% }}}
-% {{{ RunUncoupledISSM: 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'CreateMesh');
+    loaddata(org,'MeshGeometry');
+
+    %miscellaneous
+    md.miscellaneous.name='test4001';
+
+    %initial velocity:
+    md.initialization.vx=zeros(md.mesh.numberofvertices,1);
+    md.initialization.vy=zeros(md.mesh.numberofvertices,1);
+    md.initialization.vz=zeros(md.mesh.numberofvertices,1);
+
+    %friction:
+    md.friction.coefficient=30*ones(md.mesh.numberofvertices,1);
+    pos=find(md.mask.groundedice_levelset<=0);
+    md.friction.coefficient(pos)=0;
+    md.friction.p=ones(md.mesh.numberofelements,1);
+    md.friction.q=ones(md.mesh.numberofelements,1);
+
+    %temperatures and surface mass balance:
+    md.initialization.temperature=(273.15-20)*ones(md.mesh.numberofvertices,1);
+    md.initialization.pressure=md.materials.rho_ice*md.constants.g*(md.geometry.surface-md.geometry.base);
+    md.smb.mass_balance = [1*ones(md.mesh.numberofvertices,1); 1];
+
+    %Flow law
+    md.materials.rheology_B=paterson(md.initialization.temperature);
+    md.materials.rheology_n=3*ones(md.mesh.numberofelements,1);
+    md.damage.D=zeros(md.mesh.numberofvertices,1);
+    md.damage.spcdamage=NaN*ones(md.mesh.numberofvertices,1);
+
+    %the spcs going
+    md.stressbalance.spcvx=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.spcvy=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.spcvz=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.referential=NaN*ones(md.mesh.numberofvertices,6);
+    md.stressbalance.loadingforce=0*ones(md.mesh.numberofvertices,3);
+    md.masstransport.spcthickness=NaN*ones(md.mesh.numberofvertices,1);
+
+    %deal with water:
+    pos=find(md.mask.ice_levelset>0);
+    md.stressbalance.spcvx(pos)=0;
+    md.stressbalance.spcvy(pos)=0;
+    md.stressbalance.spcvz(pos)=0;
+    md.masstransport.spcthickness(pos)=0;
+
+    %get some flux at the ice divide:
+    pos=find(md.mesh.lat==min(md.mesh.lat));
+    md.stressbalance.spcvy(pos)=200;
+
+    %deal with boundaries, excluding icefront:
+    vertex_on_boundary=zeros(md.mesh.numberofvertices,1);
+    vertex_on_boundary(md.mesh.segments(:,1:2))=1;
+    pos=find(vertex_on_boundary & md.mask.groundedice_levelset<=0);
+    md.stressbalance.spcvx(pos)=md.initialization.vx(pos);
+    md.stressbalance.spcvy(pos)=md.initialization.vy(pos);
+    md.stressbalance.spcvz(pos)=md.initialization.vz(pos);
+    md.masstransport.spcthickness(pos)=md.geometry.thickness(pos);
+
+    md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
+    md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
+    md.thermal.spctemperature=[md.initialization.temperature; 1]; %impose observed temperature on surface
+    md.basalforcings.geothermalflux=.064*ones(md.mesh.numberofvertices,1);
+
+    %flow equations:
+    md=setflowequation(md,'SSA','all');
+
+    savemodel(org,md);
+end
+% }}}
+% {{{ RunUncoupledISSM:
 if perform(org,'RunUncoupledISSM'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'ParameterizeIce');
-
-	%timestepping: 
-	md.timestepping.final_time=final_time;
-	md.timestepping.time_step=time_step;
-	md.transient.isgroundingline=1;
-	md.transient.isthermal=0;
-	md.groundingline.migration='SubelementMigration';
-	md.groundingline.melt_interpolation='SubelementMelt2';
-	md.groundingline.friction_interpolation='SubelementFriction2';
-
-	md.cluster=generic('name',oshostname(),'np',2);
-	md=solve(md,'Transient');
-
-	savemodel(org,md);
+
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+
+    %timestepping:
+    md.timestepping.final_time=final_time;
+    md.timestepping.time_step=time_step;
+    md.transient.isgroundingline=1;
+    md.transient.isthermal=0;
+    md.groundingline.migration='SubelementMigration';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+
+    md.cluster=generic('name',oshostname(),'np',2);
+    md=solve(md,'Transient');
+
+    savemodel(org,md);
 end
 % }}}
 
 %Run MITgcm/ISSM
-% {{{ RunCoupledMITgcmISSM: 
+% {{{ RunCoupledMITgcmISSM:
 if perform(org,'RunCoupledMITgcmISSM'),
 
-	%load data: 
-	loaddata(org,'Parameters');
-	loaddata(org,'ParameterizeIce');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
+    %load data:
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
         endtime = round(MITgcmDeltaT * ...
          floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
 
-        % {{{ prepare MITgcm 
+        % {{{ prepare MITgcm
         % rename previous run directory and create new one
         if exist ('run.old')
@@ -584,5 +585,5 @@
         !\cp ../MITgcm/input/eedata_uncoupled run/eedata
 
-        %load data: 
+        %load data:
         loaddata(org,'Parameters');
 
@@ -633,5 +634,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,' OB_Iwest = 40*1,'),
                 fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
@@ -651,31 +652,31 @@
         writebin('run/bathymetry.bin',bathymetry);
         % }}}
-            
-	% {{{ ISSM settings:
-
-	setenv('DYLD_LIBRARY_PATH', '/usr/local/gfortran/lib') 
-	%timestepping: 
-	md.timestepping.start_time=start_time;
-	md.timestepping.final_time=final_time;
-	md.timestepping.time_step=time_step;
-	md.cluster=generic('name',oshostname(),'np',2);
-	md.results.TransientSolution.Base=md.geometry.base;
-	md.transient.isgroundingline=1;
-	md.transient.isthermal=0;
-	md.groundingline.migration='SubelementMigration';
-	md.groundingline.melt_interpolation='SubelementMelt2';
-	md.groundingline.friction_interpolation='SubelementFriction2';
-
-	% }}}
-
-	%start looping:
-	results=md.results;
-
-	for t=start_time:time_step:final_time
+
+    % {{{ ISSM settings:
+
+    setenv('DYLD_LIBRARY_PATH', '/usr/local/gfortran/lib')
+    %timestepping:
+    md.timestepping.start_time=start_time;
+    md.timestepping.final_time=final_time;
+    md.timestepping.time_step=time_step;
+    md.cluster=generic('name',oshostname(),'np',2);
+    md.results.TransientSolution.Base=md.geometry.base;
+    md.transient.isgroundingline=1;
+    md.transient.isthermal=0;
+    md.groundingline.migration='SubelementMigration';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+
+    % }}}
+
+    %start looping:
+    results=md.results;
+
+    for t=start_time:time_step:final_time
             disp(['Year: ' num2str(t)])
 
-	    %send draft from ISSM to MITgcm:
-	    draft=md.results.TransientSolution(end).Base;
-	    pos=find(md.mask.ice_levelset>0); draft(pos)=0;
+        %send draft from ISSM to MITgcm:
+        draft=md.results.TransientSolution(end).Base;
+        pos=find(md.mask.ice_levelset>0); draft(pos)=0;
             if t>start_time
                 old_draft=readbin('run/icetopo.bin',[Nx,Ny]);
@@ -683,49 +684,49 @@
             writebin('run/icetopo.bin',draft);
 
-	    % {{{ generate MITgcm parameter file data 
-	    fidi=fopen('../MITgcm/input/data','r');
-	    fido=fopen('run/data','w');
-	    tline = fgetl(fidi);
-	    fprintf(fido,'%s\n',tline);
+        % {{{ generate MITgcm parameter file data
+        fidi=fopen('../MITgcm/input/data','r');
+        fido=fopen('run/data','w');
+        tline = fgetl(fidi);
+        fprintf(fido,'%s\n',tline);
             while 1
                 tline = fgetl(fidi);
                 if ~ischar(tline), break, end
-		%do the change here: 
-		if strcmpi(tline,' xgOrigin = 0.0,'),
-		    fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
-		    continue;
-		end
-		if strcmpi(tline,' ygOrigin = -80.0,'),
-		    fprintf(fido,'%s%i%s\n',' ygOrigin = ',ygOrigin,',');
-		    continue;
-		end
-		if strcmpi(tline,' delX = 20*0.25,'),
-		    fprintf(fido,'%s%i*%g%s\n',' delX = ',Nx,dLong,',');
-		    continue;
-		end
-		if strcmpi(tline,' delY = 20*0.25,'),
-		    fprintf(fido,'%s%i*%g%s\n',' delY = ',Ny,dLat,',');
-		    continue;
-		end
+        %do the change here:
+        if strcmpi(tline,' xgOrigin = 0.0,'),
+            fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
+            continue;
+        end
+        if strcmpi(tline,' ygOrigin = -80.0,'),
+            fprintf(fido,'%s%i%s\n',' ygOrigin = ',ygOrigin,',');
+            continue;
+        end
+        if strcmpi(tline,' delX = 20*0.25,'),
+            fprintf(fido,'%s%i*%g%s\n',' delX = ',Nx,dLong,',');
+            continue;
+        end
+        if strcmpi(tline,' delY = 20*0.25,'),
+            fprintf(fido,'%s%i*%g%s\n',' delY = ',Ny,dLat,',');
+            continue;
+        end
                 if strcmpi(tline,' delZ = 30*30.0,'),
                     fprintf(fido,'%s%i*%g%s\n',' delZ = ',Nz,delZ,',');
                     continue;
                 end
-		if strcmpi(tline,' endTime=2592000.,'),
-		    fprintf(fido,'%s%i%s\n',' endTime= ',endtime,',');
-		    continue;
-		end
-		if strcmpi(tline,' deltaT=1200.0,'),
-		    fprintf(fido,'%s%i%s\n',' deltaT= ',MITgcmDeltaT,',');
-		    continue;
-		end
-		if strcmpi(tline,' pChkptFreq=2592000.,'),
-		    fprintf(fido,'%s%i%s\n',' pChkptFreq= ',endtime,',');
-		    continue;
-		end
-		if strcmpi(tline,' taveFreq=2592000.,'),
-		    fprintf(fido,'%s%i%s\n',' taveFreq= ',endtime,',');
-		    continue;
-		end
+        if strcmpi(tline,' endTime=2592000.,'),
+            fprintf(fido,'%s%i%s\n',' endTime= ',endtime,',');
+            continue;
+        end
+        if strcmpi(tline,' deltaT=1200.0,'),
+            fprintf(fido,'%s%i%s\n',' deltaT= ',MITgcmDeltaT,',');
+            continue;
+        end
+        if strcmpi(tline,' pChkptFreq=2592000.,'),
+            fprintf(fido,'%s%i%s\n',' pChkptFreq= ',endtime,',');
+            continue;
+        end
+        if strcmpi(tline,' taveFreq=2592000.,'),
+            fprintf(fido,'%s%i%s\n',' taveFreq= ',endtime,',');
+            continue;
+        end
                 if strcmpi(tline,' rhoConst=1030.,'),
                     fprintf(fido,'%s%i%s\n',' rhoConst= ',rho_water,',');
@@ -736,12 +737,12 @@
                     continue;
                 end
-		fprintf(fido,'%s\n',tline);
-	    end
-	    %close  files
-	    fclose(fidi);
-	    fclose(fido);
-	    % }}}
-
-	    % {{{ generate initial MITgcm conditions
+        fprintf(fido,'%s\n',tline);
+        end
+        %close  files
+        fclose(fidi);
+        fclose(fido);
+        % }}}
+
+        % {{{ generate initial MITgcm conditions
             ds=round(endtime/MITgcmDeltaT);
             if t>start_time
@@ -762,5 +763,5 @@
                 tmp(find(tmp<0))=0;
                 [im jm]=find(tmp); % horizontal indices where there is melt
-                
+
                 % Extrapolate T/S to locations where ice shelf retreated
                 for i=1:length(im)
@@ -796,5 +797,5 @@
             % }}}
 
-            % {{{ system call to run MITgcm 
+            % {{{ system call to run MITgcm
             cd run
             eval(['!mpirun -np ' int2str(nPx*nPy) ' ./mitgcmuv']);
@@ -812,41 +813,41 @@
             % }}}
 
-	    %get melting rates from MITgcm
-	    %upward fresh water flux (kg/m^2/s):
-	    fnm=['run/SHICE_fwFluxtave_' myint2str(ts,10) '.data'];
-	    melting_rate=readbin(fnm,[Nx Ny]);
-
-	    %send averaged melting rate to ISSM
-	    %downward fresh water flux (m/y):
-	    melting_rate=-melting_rate(:)*y2s/rho_ice;
-		 md.basalforcings.floatingice_melting_rate=melting_rate;
-
-	    % {{{ run ISSM and recover results 
-
-	    md.timestepping.start_time=t;
-	    md.timestepping.final_time=t+time_step;;
-		md=solve(md,'Transient');
-
-		base=md.results.TransientSolution(end).Base;
-		thickness=md.results.TransientSolution(end).Thickness;
-		md.geometry.base=base;
-		md.geometry.thickness=thickness;
-		md.geometry.surface=md.geometry.base+md.geometry.thickness;
-		md.initialization.vx=md.results.TransientSolution(end).Vx;
-		md.initialization.vy=md.results.TransientSolution(end).Vy;
-		md.initialization.vel=md.results.TransientSolution(end).Vel;
-		md.initialization.pressure=md.results.TransientSolution(end).Pressure;
-		md.mask.groundedice_levelset=md.results.TransientSolution(end).MaskGroundediceLevelset;
-		md.results.TransientSolution(end).FloatingiceMeltingRate=md.basalforcings.floatingice_melting_rate;
-		
-		%save these results in the model, otherwise, they'll be wiped out
-		results(end+1)=md.results;
-
-		% }}}
-
-	end
-
-	md.results=results;
-	savemodel(org,md);
+        %get melting rates from MITgcm
+        %upward fresh water flux (kg/m^2/s):
+        fnm=['run/SHICE_fwFluxtave_' myint2str(ts,10) '.data'];
+        melting_rate=readbin(fnm,[Nx Ny]);
+
+        %send averaged melting rate to ISSM
+        %downward fresh water flux (m/y):
+        melting_rate=-melting_rate(:)*y2s/rho_ice;
+         md.basalforcings.floatingice_melting_rate=melting_rate;
+
+        % {{{ run ISSM and recover results
+
+        md.timestepping.start_time=t;
+        md.timestepping.final_time=t+time_step;;
+        md=solve(md,'Transient');
+
+        base=md.results.TransientSolution(end).Base;
+        thickness=md.results.TransientSolution(end).Thickness;
+        md.geometry.base=base;
+        md.geometry.thickness=thickness;
+        md.geometry.surface=md.geometry.base+md.geometry.thickness;
+        md.initialization.vx=md.results.TransientSolution(end).Vx;
+        md.initialization.vy=md.results.TransientSolution(end).Vy;
+        md.initialization.vel=md.results.TransientSolution(end).Vel;
+        md.initialization.pressure=md.results.TransientSolution(end).Pressure;
+        md.mask.groundedice_levelset=md.results.TransientSolution(end).MaskGroundediceLevelset;
+        md.results.TransientSolution(end).FloatingiceMeltingRate=md.basalforcings.floatingice_melting_rate;
+
+        %save these results in the model, otherwise, they'll be wiped out
+        results(end+1)=md.results;
+
+        % }}}
+
+    end
+
+    md.results=results;
+    savemodel(org,md);
 end
 % }}}
@@ -862,32 +863,32 @@
 melting_rate_4=readbin(fnm,[Nx Ny]);
 field_names     ={'Base1','Melting1','Vx2','Vy2','Thickness2','Base2','MaskGroundediceLevelset2','FloatingiceMeltingRate2',...
-	'Melting2','Vx3','Vy3','Thickness3','Base3','MaskGroundediceLevelset3','FloatingiceMeltingRate3',...
-	'Melting3','Vx4','Vy4','Thickness4','Base4','MaskGroundediceLevelset4','FloatingiceMeltingRate4','Melting4'};
+    'Melting2','Vx3','Vy3','Thickness3','Base3','MaskGroundediceLevelset3','FloatingiceMeltingRate3',...
+    'Melting3','Vx4','Vy4','Thickness4','Base4','MaskGroundediceLevelset4','FloatingiceMeltingRate4','Melting4'};
 field_tolerances={2e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,...
-	1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,...
-	1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
+    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,...
+    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
 field_values={...
-	(md.results(1).TransientSolution(end).Base),...
-	(melting_rate_1(:)),...
-	(md.results(2).TransientSolution(end).Vx),...
-	(md.results(2).TransientSolution(end).Vy),...
-	(md.results(2).TransientSolution(end).Thickness),...
-	(md.results(2).TransientSolution(end).Base),...
-	(md.results(2).TransientSolution(end).MaskGroundediceLevelset),...
-	(md.results(2).TransientSolution(end).FloatingiceMeltingRate),...
-	(melting_rate_2(:)),...
-	(md.results(3).TransientSolution(end).Vx),...
-	(md.results(3).TransientSolution(end).Vy),...
-	(md.results(3).TransientSolution(end).Thickness),...
-	(md.results(3).TransientSolution(end).Base),...
-	(md.results(3).TransientSolution(end).MaskGroundediceLevelset),...
-	(md.results(3).TransientSolution(end).FloatingiceMeltingRate),...
-	(melting_rate_3(:)),...
-	(md.results(4).TransientSolution(end).Vx),...
-	(md.results(4).TransientSolution(end).Vy),...
-	(md.results(4).TransientSolution(end).Thickness),...
-	(md.results(4).TransientSolution(end).Base),...
-	(md.results(4).TransientSolution(end).MaskGroundediceLevelset),...
-	(md.results(4).TransientSolution(end).FloatingiceMeltingRate),...
-	(melting_rate_4(:)),...
-	};
+    (md.results(1).TransientSolution(end).Base),...
+    (melting_rate_1(:)),...
+    (md.results(2).TransientSolution(end).Vx),...
+    (md.results(2).TransientSolution(end).Vy),...
+    (md.results(2).TransientSolution(end).Thickness),...
+    (md.results(2).TransientSolution(end).Base),...
+    (md.results(2).TransientSolution(end).MaskGroundediceLevelset),...
+    (md.results(2).TransientSolution(end).FloatingiceMeltingRate),...
+    (melting_rate_2(:)),...
+    (md.results(3).TransientSolution(end).Vx),...
+    (md.results(3).TransientSolution(end).Vy),...
+    (md.results(3).TransientSolution(end).Thickness),...
+    (md.results(3).TransientSolution(end).Base),...
+    (md.results(3).TransientSolution(end).MaskGroundediceLevelset),...
+    (md.results(3).TransientSolution(end).FloatingiceMeltingRate),...
+    (melting_rate_3(:)),...
+    (md.results(4).TransientSolution(end).Vx),...
+    (md.results(4).TransientSolution(end).Vy),...
+    (md.results(4).TransientSolution(end).Thickness),...
+    (md.results(4).TransientSolution(end).Base),...
+    (md.results(4).TransientSolution(end).MaskGroundediceLevelset),...
+    (md.results(4).TransientSolution(end).FloatingiceMeltingRate),...
+    (melting_rate_4(:)),...
+    };
Index: /issm/trunk/test/NightlyRun/test4002.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4002.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test4002.m	(revision 24686)
@@ -3,72 +3,73 @@
 %
 %Script control parameters
-steps=[1 2 3 4 5 6 7 8 9 10 11 12];
+steps=1:12;
 final_time=1/365;
 
 %To download and recompile MITgcm from scratch:
-!rm -rf $ISSM_DIR/test/MITgcm/install
-!rm -rf $ISSM_DIR/test/MITgcm/build/*
-!rm $ISSM_DIR/test/MITgcm/code/SIZE.h
+!rm -rf ${ISSM_DIR}/test/MITgcm/install
+!rm -rf ${ISSM_DIR}/test/MITgcm/build/*
+!rm -f ${ISSM_DIR}/test/MITgcm/code/SIZE.h
+!rm -rf Models
 
 %Organizer
-mkdir Models
-org=organizer('repository','Models/','prefix','IceOcean.','steps',steps);
+!mkdir Models
+org=organizer('repository','Models','prefix','IceOcean.','steps',steps);
 
 presentdirectory=pwd;
 
-% {{{ Parameters: 
+% {{{ Parameters:
 if perform(org,'Parameters'),
-	Nx=20; %number of longitude cells
-	Ny=40; %number of latitude cells
-	Nz=30; %number of MITgcm vertical cells
-	nPx=2; %number of MITgcm processes to use in x direction
-	nPy=4; %number of MITgcm processes to use in y direction
-	xgOrigin=0; %origin of longitude
-	ygOrigin=-80; %origin of latitude
-	dLong=.25; %longitude grid spacing
-	dLat=.05; %latitude grid spacing
-	delZ=30; %thickness of vertical levels
-	icefront_position_ratio=.75; 
-	ice_thickness=100;
-	rho_ice=917;
-	rho_water=1028.14;
-	di=rho_ice/rho_water;
-
-	% MITgcm initial and lateral boundary conditions
-	iniSalt  = 34.4; % initial salinity (PSU)
-	iniTheta = -1.9; % initial potential temperature (deg C)
-	obcSalt  = 34.4; % open boundary salinity (PSU)
-	obcTheta =  1.0; % open boundary potential temperature (deg C)
-	mlDepth  = 120.; % mixed layer depth (m)
-	mlSalt   = 33.4; % open boundary salinity (PSU)
-	mlTheta  = -1.9; % open boundary potential temperature (deg C)
-	obcUvel  = -0.1; % open boundary velocity (m/s)        
-
-	MITgcmDeltaT=600; % MITgcm time step in seconds
-	y2s=31536000; % year to seconds conversion, i.e., seconds per year
-
-	% start_time and time_step
-	start_time=0; % in decimal years
-	time_step=1/(365*24); % coupling interval in decimal years
-	async_step_MITgcm_multiplier=1; % used to reduce run time for MItgcm
-
-	% bedrock/bathymetry
-	hmax=1000;
-	trough_depth=200;
-	deltah=300;
-	sea_level=1095;
-
-	% issm settings:
-	numlayers=10;
-
-	savedata(org, Nx, Ny, nPx, nPy, Nz, dLong, dLat, delZ, xgOrigin, ...
-		ygOrigin, icefront_position_ratio, ice_thickness, rho_ice, ...
-		rho_water, di, hmax, trough_depth, deltah, sea_level, ...
-		iniSalt, iniTheta, obcSalt, obcTheta, mlDepth, mlSalt, ...
-		mlTheta, obcUvel, start_time, time_step, MITgcmDeltaT, y2s,...
-		numlayers,async_step_MITgcm_multiplier);
-end
-% }}}
-% {{{ Bathymetry: 
+    Nx=20; %number of longitude cells
+    Ny=40; %number of latitude cells
+    Nz=30; %number of MITgcm vertical cells
+    nPx=2; %number of MITgcm processes to use in x direction
+    nPy=4; %number of MITgcm processes to use in y direction
+    xgOrigin=0; %origin of longitude
+    ygOrigin=-80; %origin of latitude
+    dLong=.25; %longitude grid spacing
+    dLat=.05; %latitude grid spacing
+    delZ=30; %thickness of vertical levels
+    icefront_position_ratio=.75;
+    ice_thickness=100;
+    rho_ice=917;
+    rho_water=1028.14;
+    di=rho_ice/rho_water;
+
+    % MITgcm initial and lateral boundary conditions
+    iniSalt  = 34.4; % initial salinity (PSU)
+    iniTheta = -1.9; % initial potential temperature (deg C)
+    obcSalt  = 34.4; % open boundary salinity (PSU)
+    obcTheta =  1.0; % open boundary potential temperature (deg C)
+    mlDepth  = 120.; % mixed layer depth (m)
+    mlSalt   = 33.4; % open boundary salinity (PSU)
+    mlTheta  = -1.9; % open boundary potential temperature (deg C)
+    obcUvel  = -0.1; % open boundary velocity (m/s)
+
+    MITgcmDeltaT=600; % MITgcm time step in seconds
+    y2s=31536000; % year to seconds conversion, i.e., seconds per year
+
+    % start_time and time_step
+    start_time=0; % in decimal years
+    time_step=1/(365*24); % coupling interval in decimal years
+    async_step_MITgcm_multiplier=1; % used to reduce run time for MItgcm
+
+    % bedrock/bathymetry
+    hmax=1000;
+    trough_depth=200;
+    deltah=300;
+    sea_level=1095;
+
+    % issm settings:
+    numlayers=10;
+
+    savedata(org, Nx, Ny, nPx, nPy, Nz, dLong, dLat, delZ, xgOrigin, ...
+        ygOrigin, icefront_position_ratio, ice_thickness, rho_ice, ...
+        rho_water, di, hmax, trough_depth, deltah, sea_level, ...
+        iniSalt, iniTheta, obcSalt, obcTheta, mlDepth, mlSalt, ...
+        mlTheta, obcUvel, start_time, time_step, MITgcmDeltaT, y2s,...
+        numlayers,async_step_MITgcm_multiplier);
+end
+% }}}
+% {{{ Bathymetry:
 if perform(org,'Bathymetry'),
 
@@ -95,47 +96,47 @@
 end
 % }}}
-% {{{ IceSheetGeometry: 
+% {{{ IceSheetGeometry:
 if perform(org,'IceSheetGeometry'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'Bathymetry');
-	latmin=min(lat(:));
-	latmax=max(lat(:));
-
-	%put ice_thickness constant layer of ice over the bathymetry, unless it floats: 
-	s=size(bathymetry);
-	thickness=ice_thickness*ones(s);
-
-	%figure out ice shelf: 
-	pos=find(-di*thickness>bathymetry);
-	iceshelf_mask=zeros(s);
-	iceshelf_mask(pos)=1;
-
-	ice_mask=ones(s);
-	pos=find((lat-latmin)/(latmax-latmin)>(icefront_position_ratio));
-	ice_mask(pos)=0;
-	iceshelf_mask(pos)=0;
-
-	%compute draft of ice shelf: 
-	draft=bathymetry;
-	pos=find(iceshelf_mask);
-	draft(pos)=-di*thickness(pos);
-	pos=find(~ice_mask); 
-	draft(pos)=0;
-
-	savedata(org,ice_mask,iceshelf_mask,draft,thickness);
+
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    latmin=min(lat(:));
+    latmax=max(lat(:));
+
+    %put ice_thickness constant layer of ice over the bathymetry, unless it floats:
+    s=size(bathymetry);
+    thickness=ice_thickness*ones(s);
+
+    %figure out ice shelf:
+    pos=find(-di*thickness>bathymetry);
+    iceshelf_mask=zeros(s);
+    iceshelf_mask(pos)=1;
+
+    ice_mask=ones(s);
+    pos=find((lat-latmin)/(latmax-latmin)>(icefront_position_ratio));
+    ice_mask(pos)=0;
+    iceshelf_mask(pos)=0;
+
+    %compute draft of ice shelf:
+    draft=bathymetry;
+    pos=find(iceshelf_mask);
+    draft(pos)=-di*thickness(pos);
+    pos=find(~ice_mask);
+    draft(pos)=0;
+
+    savedata(org,ice_mask,iceshelf_mask,draft,thickness);
 end
 % }}}
 
 %Configure MITgcm
-% {{{ GetMITgcm: 
+% {{{ GetMITgcm:
 if perform(org,'GetMITgcm'),
   system([pwd '/../MITgcm/get_mitgcm.sh']);
 end
 % }}}
-% {{{ BuildMITgcm: 
+% {{{ BuildMITgcm:
 if perform(org,'BuildMITgcm'),
 
-    %load data: 
+    %load data:
     loaddata(org,'Parameters');
 
@@ -149,5 +150,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,'     &           sNx =  20,'),
                 fprintf(fido,'%s%i%s\n','     &           sNx =  ',round(Nx/nPx),',');
@@ -177,15 +178,15 @@
 % }}}
 addpath(recursivepath([pwd '/../MITgcm']));
-% {{{ RunUncoupledMITgcm: 
+% {{{ RunUncoupledMITgcm:
 if perform(org,'RunUncoupledMITgcm'),
 
-    %load data: 
+    %load data:
     loaddata(org,'Parameters');
     loaddata(org,'Bathymetry');
     loaddata(org,'IceSheetGeometry');
-	 endtime = round(MITgcmDeltaT * ...
-		 floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
-
-    % {{{ prepare MITgcm 
+     endtime = round(MITgcmDeltaT * ...
+         floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
+
+    % {{{ prepare MITgcm
     % rename previous run directory and create new one
     if exist ('run.old')
@@ -199,6 +200,6 @@
     !\cp ../MITgcm/input/* run
     !\cp ../MITgcm/input/eedata_uncoupled run/eedata
-    
-    %load data: 
+
+    %load data:
     loaddata(org,'Parameters');
 
@@ -206,14 +207,14 @@
     S=iniSalt*ones(Nx,Ny,Nz);
     writebin('run/Salt.bin',S);
-    
+
     % initial temperature
     T=iniTheta*ones(Nx,Ny,Nz);
     writebin('run/Theta.bin',T);
-    
+
     % initial velocity
     Z=zeros(Nx,Ny,Nz);
     writebin('run/Uvel.bin',Z);
     writebin('run/Vvel.bin',Z);
-    
+
     % initial sea surface height
     Z=zeros(Nx,Ny);
@@ -236,5 +237,5 @@
     U=obcUvel*ones(Ny,Nz);
     writebin('run/OBu.bin',U);
-    
+
     % zero boundary conditions
     Z=zeros(Ny,Nz);
@@ -249,5 +250,5 @@
         tline = fgetl(fidi);
         if ~ischar(tline), break, end
-        %do the change here: 
+        %do the change here:
         if strcmpi(tline,' OB_Iwest = 40*1,'),
             fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
@@ -269,8 +270,8 @@
     % }}}
 
-    %start looping:  
+    %start looping:
     for t=start_time:time_step:final_time,
         disp(['Year: ' num2str(t)])
-        % {{{ generate MITgcm parameter file data 
+        % {{{ generate MITgcm parameter file data
         fidi=fopen('../MITgcm/input/data','r');
         fido=fopen('run/data','w');
@@ -280,5 +281,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,' xgOrigin = 0.0,'),
                 fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
@@ -370,207 +371,207 @@
 
 %Configure ISSM
-% {{{ CreateMesh: 
+% {{{ CreateMesh:
 if perform(org,'CreateMesh'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
-
-	%create model: 
-	md=model();
-	
-	%Grab lat,long from MITgcm: 
-	lat=lat(:);
-	long=long(:);
-
-	%project lat,long: 
-	[x,y]=ll2xy(lat,long,-1);
-
-	index=[];
-	%  C  D
-	%  A  B 
-	for j=1:Ny-1,
-		for i=1:Nx-1, 
-			A=(j-1)*Nx+i;
-			B=(j-1)*Nx+i+1;
-			C=j*Nx+i;
-			D=j*Nx+i+1;
-			index(end+1,:)=[A B C];
-			index(end+1,:)=[C B D];
-		end
-	end
-
-	%fill mesh and model: 
-	md=meshconvert(md,index,x,y);
-	md.mesh.lat=lat;
-	md.mesh.long=long;
-
-	savemodel(org,md);
-
-end
-% }}}
-% {{{ MeshGeometry: 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+
+    %create model:
+    md=model();
+
+    %Grab lat,long from MITgcm:
+    lat=lat(:);
+    long=long(:);
+
+    %project lat,long:
+    [x,y]=ll2xy(lat,long,-1);
+
+    index=[];
+    %  C  D
+    %  A  B
+    for j=1:Ny-1,
+        for i=1:Nx-1,
+            A=(j-1)*Nx+i;
+            B=(j-1)*Nx+i+1;
+            C=j*Nx+i;
+            D=j*Nx+i+1;
+            index(end+1,:)=[A B C];
+            index(end+1,:)=[C B D];
+        end
+    end
+
+    %fill mesh and model:
+    md=meshconvert(md,index,x,y);
+    md.mesh.lat=lat;
+    md.mesh.long=long;
+
+    savemodel(org,md);
+
+end
+% }}}
+% {{{ MeshGeometry:
 if perform(org,'MeshGeometry'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'CreateMesh');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
-
-	%transfer to vertices: 
-	bathymetry=bathymetry(:);
-	iceshelf_mask=iceshelf_mask(:);
-	ice_mask=ice_mask(:);
-	thickness=thickness(:);
-	draft=draft(:);
-
-	%start filling some of the fields 
-	md.geometry.bed=bathymetry;
-	md.geometry.thickness=thickness;
-	md.geometry.base=md.geometry.bed; 
-	pos=find(iceshelf_mask); md.geometry.base(pos)=draft(pos);
-	md.geometry.surface=md.geometry.base+md.geometry.thickness;
-
-	%nothing passes icefront: 
-	pos=find(~ice_mask);
-	md.geometry.thickness(pos)=1;
-	md.geometry.surface(pos)=(1-di)*md.geometry.thickness(pos);
-	md.geometry.base(pos)=-di*md.geometry.thickness(pos);
-
-	%level sets: 
-	md.mask.groundedice_levelset=-ones(md.mesh.numberofvertices,1);
-	md.mask.ice_levelset=ones(md.mesh.numberofvertices,1);
-
-	pos=find(ice_mask); md.mask.ice_levelset(pos)=-1; 
-	pos=find(~iceshelf_mask & ice_mask); md.mask.groundedice_levelset(pos)=1;
-
-	%identify edges of grounded ice: 
-	groundedice_levelset=md.mask.groundedice_levelset;
-	for i=1:md.mesh.numberofelements,
-		m=groundedice_levelset(md.mesh.elements(i,:));
-		if abs(sum(m))~=3,
-			pos=find(m==1); md.mask.groundedice_levelset(md.mesh.elements(i,pos))=0;
-		end
-	end
-
-	%identify edges of ice: 
-	ice_levelset=md.mask.ice_levelset;
-	for i=1:md.mesh.numberofelements,
-		m=ice_levelset(md.mesh.elements(i,:));
-		if abs(sum(m))~=3,
-			pos=find(m==-1); md.mask.ice_levelset(md.mesh.elements(i,pos))=0;
-		end
-	end
-
-	savemodel(org,md);
-end
-% }}}
-% {{{ ParameterizeIce: 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'CreateMesh');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+
+    %transfer to vertices:
+    bathymetry=bathymetry(:);
+    iceshelf_mask=iceshelf_mask(:);
+    ice_mask=ice_mask(:);
+    thickness=thickness(:);
+    draft=draft(:);
+
+    %start filling some of the fields
+    md.geometry.bed=bathymetry;
+    md.geometry.thickness=thickness;
+    md.geometry.base=md.geometry.bed;
+    pos=find(iceshelf_mask); md.geometry.base(pos)=draft(pos);
+    md.geometry.surface=md.geometry.base+md.geometry.thickness;
+
+    %nothing passes icefront:
+    pos=find(~ice_mask);
+    md.geometry.thickness(pos)=1;
+    md.geometry.surface(pos)=(1-di)*md.geometry.thickness(pos);
+    md.geometry.base(pos)=-di*md.geometry.thickness(pos);
+
+    %level sets:
+    md.mask.groundedice_levelset=-ones(md.mesh.numberofvertices,1);
+    md.mask.ice_levelset=ones(md.mesh.numberofvertices,1);
+
+    pos=find(ice_mask); md.mask.ice_levelset(pos)=-1;
+    pos=find(~iceshelf_mask & ice_mask); md.mask.groundedice_levelset(pos)=1;
+
+    %identify edges of grounded ice:
+    groundedice_levelset=md.mask.groundedice_levelset;
+    for i=1:md.mesh.numberofelements,
+        m=groundedice_levelset(md.mesh.elements(i,:));
+        if abs(sum(m))~=3,
+            pos=find(m==1); md.mask.groundedice_levelset(md.mesh.elements(i,pos))=0;
+        end
+    end
+
+    %identify edges of ice:
+    ice_levelset=md.mask.ice_levelset;
+    for i=1:md.mesh.numberofelements,
+        m=ice_levelset(md.mesh.elements(i,:));
+        if abs(sum(m))~=3,
+            pos=find(m==-1); md.mask.ice_levelset(md.mesh.elements(i,pos))=0;
+        end
+    end
+
+    savemodel(org,md);
+end
+% }}}
+% {{{ ParameterizeIce:
 if perform(org,'ParameterizeIce'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'CreateMesh');
-	loaddata(org,'MeshGeometry');
-
-	%miscellaneous
-	md.miscellaneous.name='test4002';
-
-	%initial velocity: 
-	md.initialization.vx=zeros(md.mesh.numberofvertices,1);
-	md.initialization.vy=zeros(md.mesh.numberofvertices,1);
-	md.initialization.vz=zeros(md.mesh.numberofvertices,1);
-
-	%friction: 
-	md.friction.coefficient=30*ones(md.mesh.numberofvertices,1);
-	pos=find(md.mask.groundedice_levelset<=0);
-	md.friction.coefficient(pos)=0;
-	md.friction.p=ones(md.mesh.numberofelements,1);
-	md.friction.q=ones(md.mesh.numberofelements,1);
-
-	%temperatures and surface mass balance:
-	md.initialization.temperature=(273.15-20)*ones(md.mesh.numberofvertices,1);
-	md.initialization.pressure=md.materials.rho_ice*md.constants.g*(md.geometry.surface-md.geometry.base);
-	md.smb.mass_balance = [1*ones(md.mesh.numberofvertices,1); 1];
-
-	%Flow law 
-	md.materials.rheology_B=paterson(md.initialization.temperature);
-	md.materials.rheology_n=3*ones(md.mesh.numberofelements,1);
-	md.damage.D=zeros(md.mesh.numberofvertices,1);
-	md.damage.spcdamage=NaN*ones(md.mesh.numberofvertices,1);
-	
-	%the spcs going
-	md.stressbalance.spcvx=NaN*ones(md.mesh.numberofvertices,1);
-	md.stressbalance.spcvy=NaN*ones(md.mesh.numberofvertices,1);
-	md.stressbalance.spcvz=NaN*ones(md.mesh.numberofvertices,1);
-	md.stressbalance.referential=NaN*ones(md.mesh.numberofvertices,6);
-	md.stressbalance.loadingforce=0*ones(md.mesh.numberofvertices,3);
-	md.masstransport.spcthickness=NaN*ones(md.mesh.numberofvertices,1); 
-
-	%deal with water: 
-	pos=find(md.mask.ice_levelset>0); 
-	md.stressbalance.spcvx(pos)=0;
-	md.stressbalance.spcvy(pos)=0;
-	md.stressbalance.spcvz(pos)=0;
-	md.masstransport.spcthickness(pos)=0;
-
-	%get some flux at the ice divide: 
-	pos=find(md.mesh.lat==min(md.mesh.lat));
-	md.stressbalance.spcvy(pos)=200;
-
-	%deal with boundaries, excluding icefront: 
-	vertex_on_boundary=zeros(md.mesh.numberofvertices,1);
-	vertex_on_boundary(md.mesh.segments(:,1:2))=1;
-	pos=find(vertex_on_boundary & md.mask.groundedice_levelset<=0);
-	md.stressbalance.spcvx(pos)=md.initialization.vx(pos);
-	md.stressbalance.spcvy(pos)=md.initialization.vy(pos);
-	md.stressbalance.spcvz(pos)=md.initialization.vz(pos);
-	md.masstransport.spcthickness(pos)=md.geometry.thickness(pos);
-
-	md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
-	md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
-	md.thermal.spctemperature=[md.initialization.temperature; 1]; %impose observed temperature on surface
-	md.basalforcings.geothermalflux=.064*ones(md.mesh.numberofvertices,1);
-
-	%flow equations: 
-	md=setflowequation(md,'SSA','all');
-
-	savemodel(org,md);
-end
-% }}}
-% {{{ RunUncoupledISSM: 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'CreateMesh');
+    loaddata(org,'MeshGeometry');
+
+    %miscellaneous
+    md.miscellaneous.name='test4002';
+
+    %initial velocity:
+    md.initialization.vx=zeros(md.mesh.numberofvertices,1);
+    md.initialization.vy=zeros(md.mesh.numberofvertices,1);
+    md.initialization.vz=zeros(md.mesh.numberofvertices,1);
+
+    %friction:
+    md.friction.coefficient=30*ones(md.mesh.numberofvertices,1);
+    pos=find(md.mask.groundedice_levelset<=0);
+    md.friction.coefficient(pos)=0;
+    md.friction.p=ones(md.mesh.numberofelements,1);
+    md.friction.q=ones(md.mesh.numberofelements,1);
+
+    %temperatures and surface mass balance:
+    md.initialization.temperature=(273.15-20)*ones(md.mesh.numberofvertices,1);
+    md.initialization.pressure=md.materials.rho_ice*md.constants.g*(md.geometry.surface-md.geometry.base);
+    md.smb.mass_balance = [1*ones(md.mesh.numberofvertices,1); 1];
+
+    %Flow law
+    md.materials.rheology_B=paterson(md.initialization.temperature);
+    md.materials.rheology_n=3*ones(md.mesh.numberofelements,1);
+    md.damage.D=zeros(md.mesh.numberofvertices,1);
+    md.damage.spcdamage=NaN*ones(md.mesh.numberofvertices,1);
+
+    %the spcs going
+    md.stressbalance.spcvx=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.spcvy=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.spcvz=NaN*ones(md.mesh.numberofvertices,1);
+    md.stressbalance.referential=NaN*ones(md.mesh.numberofvertices,6);
+    md.stressbalance.loadingforce=0*ones(md.mesh.numberofvertices,3);
+    md.masstransport.spcthickness=NaN*ones(md.mesh.numberofvertices,1);
+
+    %deal with water:
+    pos=find(md.mask.ice_levelset>0);
+    md.stressbalance.spcvx(pos)=0;
+    md.stressbalance.spcvy(pos)=0;
+    md.stressbalance.spcvz(pos)=0;
+    md.masstransport.spcthickness(pos)=0;
+
+    %get some flux at the ice divide:
+    pos=find(md.mesh.lat==min(md.mesh.lat));
+    md.stressbalance.spcvy(pos)=200;
+
+    %deal with boundaries, excluding icefront:
+    vertex_on_boundary=zeros(md.mesh.numberofvertices,1);
+    vertex_on_boundary(md.mesh.segments(:,1:2))=1;
+    pos=find(vertex_on_boundary & md.mask.groundedice_levelset<=0);
+    md.stressbalance.spcvx(pos)=md.initialization.vx(pos);
+    md.stressbalance.spcvy(pos)=md.initialization.vy(pos);
+    md.stressbalance.spcvz(pos)=md.initialization.vz(pos);
+    md.masstransport.spcthickness(pos)=md.geometry.thickness(pos);
+
+    md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
+    md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
+    md.thermal.spctemperature=[md.initialization.temperature; 1]; %impose observed temperature on surface
+    md.basalforcings.geothermalflux=.064*ones(md.mesh.numberofvertices,1);
+
+    %flow equations:
+    md=setflowequation(md,'SSA','all');
+
+    savemodel(org,md);
+end
+% }}}
+% {{{ RunUncoupledISSM:
 if perform(org,'RunUncoupledISSM'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'ParameterizeIce');
-
-	%timestepping: 
-	md.timestepping.final_time=final_time;
-	md.timestepping.time_step=time_step;
-	md.transient.isgroundingline=1;
-	md.transient.isthermal=0;
-	md.groundingline.migration='SubelementMigration';
-	md.groundingline.melt_interpolation='SubelementMelt2';
-	md.groundingline.friction_interpolation='SubelementFriction2';
-
-	md.cluster=generic('name',oshostname(),'np',2);
-	md=solve(md,'Transient');
-
-	savemodel(org,md);
+
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+
+    %timestepping:
+    md.timestepping.final_time=final_time;
+    md.timestepping.time_step=time_step;
+    md.transient.isgroundingline=1;
+    md.transient.isthermal=0;
+    md.groundingline.migration='SubelementMigration';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+
+    md.cluster=generic('name',oshostname(),'np',2);
+    md=solve(md,'Transient');
+
+    savemodel(org,md);
 end
 % }}}
 
 %Run MITgcm/ISSM
-% {{{ RunCoupledMITgcmISSM: 
+% {{{ RunCoupledMITgcmISSM:
 if perform(org,'RunCoupledMITgcmISSM'),
 
-	%load data: 
-	loaddata(org,'Parameters');
-	loaddata(org,'ParameterizeIce');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
+    %load data:
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
         endtime = round(MITgcmDeltaT * ...
          floor(time_step*y2s*async_step_MITgcm_multiplier/MITgcmDeltaT));
 
-        % {{{ prepare MITgcm 
+        % {{{ prepare MITgcm
         % rename previous run directory and create new one
         if exist ('run.old')
@@ -631,5 +632,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,' OB_Iwest = 40*1,'),
                 fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
@@ -649,31 +650,31 @@
         writebin('run/bathymetry.bin',bathymetry);
         % }}}
-            
-	% {{{ ISSM settings:
-
-	setenv('DYLD_LIBRARY_PATH', '/usr/local/gfortran/lib') 
-	%timestepping: 
-	md.timestepping.start_time=start_time;
-	md.timestepping.final_time=final_time;
-	md.timestepping.time_step=time_step;
-	md.cluster=generic('name',oshostname(),'np',2);
-	md.results.TransientSolution.Base=md.geometry.base;
-	md.transient.isgroundingline=1;
-	md.transient.isthermal=0;
-	md.groundingline.migration='SubelementMigration';
-	md.groundingline.melt_interpolation='SubelementMelt2';
-	md.groundingline.friction_interpolation='SubelementFriction2';
-
-	% }}}
-
-	%start looping:
-	results=md.results;
-
-	for t=start_time:time_step:final_time
+
+    % {{{ ISSM settings:
+
+    setenv('DYLD_LIBRARY_PATH', '/usr/local/gfortran/lib')
+    %timestepping:
+    md.timestepping.start_time=start_time;
+    md.timestepping.final_time=final_time;
+    md.timestepping.time_step=time_step;
+    md.cluster=generic('name',oshostname(),'np',2);
+    md.results.TransientSolution.Base=md.geometry.base;
+    md.transient.isgroundingline=1;
+    md.transient.isthermal=0;
+    md.groundingline.migration='SubelementMigration';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+
+    % }}}
+
+    %start looping:
+    results=md.results;
+
+    for t=start_time:time_step:final_time
             disp(['Year: ' num2str(t)])
 
-	    %send draft from ISSM to MITgcm:
-	    draft=md.results.TransientSolution(end).Base;
-	    pos=find(md.mask.ice_levelset>0); draft(pos)=0;
+        %send draft from ISSM to MITgcm:
+        draft=md.results.TransientSolution(end).Base;
+        pos=find(md.mask.ice_levelset>0); draft(pos)=0;
             if t>start_time
                 old_draft=readbin('run/icetopo.bin',[Nx,Ny]);
@@ -681,49 +682,49 @@
             writebin('run/icetopo.bin',draft);
 
-	    % {{{ generate MITgcm parameter file data 
-	    fidi=fopen('../MITgcm/input/data','r');
-	    fido=fopen('run/data','w');
-	    tline = fgetl(fidi);
-	    fprintf(fido,'%s\n',tline);
+        % {{{ generate MITgcm parameter file data
+        fidi=fopen('../MITgcm/input/data','r');
+        fido=fopen('run/data','w');
+        tline = fgetl(fidi);
+        fprintf(fido,'%s\n',tline);
             while 1
                 tline = fgetl(fidi);
                 if ~ischar(tline), break, end
-		%do the change here: 
-		if strcmpi(tline,' xgOrigin = 0.0,'),
-		    fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
-		    continue;
-		end
-		if strcmpi(tline,' ygOrigin = -80.0,'),
-		    fprintf(fido,'%s%i%s\n',' ygOrigin = ',ygOrigin,',');
-		    continue;
-		end
-		if strcmpi(tline,' delX = 20*0.25,'),
-		    fprintf(fido,'%s%i*%g%s\n',' delX = ',Nx,dLong,',');
-		    continue;
-		end
-		if strcmpi(tline,' delY = 20*0.25,'),
-		    fprintf(fido,'%s%i*%g%s\n',' delY = ',Ny,dLat,',');
-		    continue;
-		end
+        %do the change here:
+        if strcmpi(tline,' xgOrigin = 0.0,'),
+            fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
+            continue;
+        end
+        if strcmpi(tline,' ygOrigin = -80.0,'),
+            fprintf(fido,'%s%i%s\n',' ygOrigin = ',ygOrigin,',');
+            continue;
+        end
+        if strcmpi(tline,' delX = 20*0.25,'),
+            fprintf(fido,'%s%i*%g%s\n',' delX = ',Nx,dLong,',');
+            continue;
+        end
+        if strcmpi(tline,' delY = 20*0.25,'),
+            fprintf(fido,'%s%i*%g%s\n',' delY = ',Ny,dLat,',');
+            continue;
+        end
                 if strcmpi(tline,' delZ = 30*30.0,'),
                     fprintf(fido,'%s%i*%g%s\n',' delZ = ',Nz,delZ,',');
                     continue;
                 end
-		if strcmpi(tline,' endTime=2592000.,'),
-		    fprintf(fido,'%s%i%s\n',' endTime= ',endtime,',');
-		    continue;
-		end
-		if strcmpi(tline,' deltaT=1200.0,'),
-		    fprintf(fido,'%s%i%s\n',' deltaT= ',MITgcmDeltaT,',');
-		    continue;
-		end
-		if strcmpi(tline,' pChkptFreq=2592000.,'),
-		    fprintf(fido,'%s%i%s\n',' pChkptFreq= ',endtime,',');
-		    continue;
-		end
-		if strcmpi(tline,' taveFreq=2592000.,'),
-		    fprintf(fido,'%s%i%s\n',' taveFreq= ',endtime,',');
-		    continue;
-		end
+        if strcmpi(tline,' endTime=2592000.,'),
+            fprintf(fido,'%s%i%s\n',' endTime= ',endtime,',');
+            continue;
+        end
+        if strcmpi(tline,' deltaT=1200.0,'),
+            fprintf(fido,'%s%i%s\n',' deltaT= ',MITgcmDeltaT,',');
+            continue;
+        end
+        if strcmpi(tline,' pChkptFreq=2592000.,'),
+            fprintf(fido,'%s%i%s\n',' pChkptFreq= ',endtime,',');
+            continue;
+        end
+        if strcmpi(tline,' taveFreq=2592000.,'),
+            fprintf(fido,'%s%i%s\n',' taveFreq= ',endtime,',');
+            continue;
+        end
                 if strcmpi(tline,' rhoConst=1030.,'),
                     fprintf(fido,'%s%i%s\n',' rhoConst= ',rho_water,',');
@@ -734,12 +735,12 @@
                     continue;
                 end
-		fprintf(fido,'%s\n',tline);
-	    end
-	    %close  files
-	    fclose(fidi);
-	    fclose(fido);
-	    % }}}
-
-	    % {{{ generate initial MITgcm conditions
+        fprintf(fido,'%s\n',tline);
+        end
+        %close  files
+        fclose(fidi);
+        fclose(fido);
+        % }}}
+
+        % {{{ generate initial MITgcm conditions
             ds=round(endtime/MITgcmDeltaT);
             if t>start_time
@@ -760,5 +761,5 @@
                 tmp(find(tmp<0))=0;
                 [im jm]=find(tmp); % horizontal indices where there is melt
-                
+
                 % Extrapolate T/S to locations where ice shelf retreated
                 for i=1:length(im)
@@ -794,5 +795,5 @@
             % }}}
 
-            % {{{ system call to run MITgcm 
+            % {{{ system call to run MITgcm
             cd run
             eval(['!mpiexec -np ' int2str(nPx*nPy) ' ./mitgcmuv']);
@@ -810,55 +811,55 @@
             % }}}
 
-	    %get melting rates from MITgcm
-	    %upward fresh water flux (kg/m^2/s):
-	    fnm=['run/SHICE_fwFluxtave_' myint2str(ts,10) '.data'];
-	    melting_rate=readbin(fnm,[Nx Ny]);
-
-	    %send averaged melting rate to ISSM
-	    %downward fresh water flux (m/y):
-	    melting_rate=-melting_rate(:)*y2s/rho_ice;
-		 md.basalforcings.floatingice_melting_rate=melting_rate;
-
-	    % {{{ run ISSM and recover results 
-
-	    md.timestepping.start_time=t;
-	    md.timestepping.final_time=t+time_step;;
-		md=solve(md,'Transient');
-
-		base=md.results.TransientSolution(end).Base;
-		thickness=md.results.TransientSolution(end).Thickness;
-		md.geometry.base=base;
-		md.geometry.thickness=thickness;
-		md.geometry.surface=md.geometry.base+md.geometry.thickness;
-		md.initialization.vx=md.results.TransientSolution(end).Vx;
-		md.initialization.vy=md.results.TransientSolution(end).Vy;
-		md.initialization.vel=md.results.TransientSolution(end).Vel;
-		md.initialization.pressure=md.results.TransientSolution(end).Pressure;
-		md.mask.groundedice_levelset=md.results.TransientSolution(end).MaskGroundediceLevelset;
-		md.results.TransientSolution(end).FloatingiceMeltingRate=md.basalforcings.floatingice_melting_rate;
-		
-		%save these results in the model, otherwise, they'll be wiped out
-		results(end+1)=md.results;
-
-		% }}}
-
-
-	end
-
-	md.results=results;
-	savemodel(org,md);
-end
-% }}}
-% {{{ RunCoupledMITgcmISSM2: 
+        %get melting rates from MITgcm
+        %upward fresh water flux (kg/m^2/s):
+        fnm=['run/SHICE_fwFluxtave_' myint2str(ts,10) '.data'];
+        melting_rate=readbin(fnm,[Nx Ny]);
+
+        %send averaged melting rate to ISSM
+        %downward fresh water flux (m/y):
+        melting_rate=-melting_rate(:)*y2s/rho_ice;
+         md.basalforcings.floatingice_melting_rate=melting_rate;
+
+        % {{{ run ISSM and recover results
+
+        md.timestepping.start_time=t;
+        md.timestepping.final_time=t+time_step;;
+        md=solve(md,'Transient');
+
+        base=md.results.TransientSolution(end).Base;
+        thickness=md.results.TransientSolution(end).Thickness;
+        md.geometry.base=base;
+        md.geometry.thickness=thickness;
+        md.geometry.surface=md.geometry.base+md.geometry.thickness;
+        md.initialization.vx=md.results.TransientSolution(end).Vx;
+        md.initialization.vy=md.results.TransientSolution(end).Vy;
+        md.initialization.vel=md.results.TransientSolution(end).Vel;
+        md.initialization.pressure=md.results.TransientSolution(end).Pressure;
+        md.mask.groundedice_levelset=md.results.TransientSolution(end).MaskGroundediceLevelset;
+        md.results.TransientSolution(end).FloatingiceMeltingRate=md.basalforcings.floatingice_melting_rate;
+
+        %save these results in the model, otherwise, they'll be wiped out
+        results(end+1)=md.results;
+
+        % }}}
+
+
+    end
+
+    md.results=results;
+    savemodel(org,md);
+end
+% }}}
+% {{{ RunCoupledMITgcmISSM2:
 if perform(org,'RunCoupledMITgcmISSM2'),
-	
-	loaddata(org,'Parameters');
-	loaddata(org,'ParameterizeIce');
-	loaddata(org,'Bathymetry');
-	loaddata(org,'IceSheetGeometry');
-		endtime = round(MITgcmDeltaT * floor(final_time*y2s/MITgcmDeltaT));
-		outputtime = round(MITgcmDeltaT * floor(time_step*y2s/MITgcmDeltaT));
-
-        % {{{ prepare MITgcm 
+
+    loaddata(org,'Parameters');
+    loaddata(org,'ParameterizeIce');
+    loaddata(org,'Bathymetry');
+    loaddata(org,'IceSheetGeometry');
+        endtime = round(MITgcmDeltaT * floor(final_time*y2s/MITgcmDeltaT));
+        outputtime = round(MITgcmDeltaT * floor(time_step*y2s/MITgcmDeltaT));
+
+        % {{{ prepare MITgcm
         % rename previous run directory and create new one
         if exist ('run.old')
@@ -918,5 +919,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,' OB_Iwest = 40*1,'),
                 fprintf(fido,'%s%i%s\n',' OB_Iwest = ',Ny,'*1,');
@@ -937,5 +938,5 @@
         writebin('run/icetopo.bin',draft);
         % }}}
-        % {{{ generate MITgcm parameter file data 
+        % {{{ generate MITgcm parameter file data
         fidi=fopen('../MITgcm/input/data','r');
         fido=fopen('run/data','w');
@@ -945,5 +946,5 @@
             tline = fgetl(fidi);
             if ~ischar(tline), break, end
-            %do the change here: 
+            %do the change here:
             if strcmpi(tline,' xgOrigin = 0.0,'),
                 fprintf(fido,'%s%i%s\n',' xgOrigin = ',xgOrigin,',');
@@ -996,22 +997,22 @@
         fclose(fido);
         % }}}
-	        
-	md.transient.isoceancoupling=1;
-	md.transient.isgroundingline=1;
-	md.groundingline.migration='None';
-	md.groundingline.melt_interpolation='SubelementMelt2';
-	md.groundingline.friction_interpolation='SubelementFriction2';
-	md.timestepping.coupling_time=time_step;
+
+    md.transient.isoceancoupling=1;
+    md.transient.isgroundingline=1;
+    md.groundingline.migration='None';
+    md.groundingline.melt_interpolation='SubelementMelt2';
+    md.groundingline.friction_interpolation='SubelementFriction2';
+    md.timestepping.coupling_time=time_step;
    md.timestepping.time_step=time_step;
-	md.timestepping.final_time=final_time-time_step;
-	md.cluster.npocean=nPx*nPy;
-	md.cluster.np=2;
-	md.cluster.executionpath=[pwd '/run'];
-	md.masstransport.requested_outputs={'default','BasalforcingsFloatingiceMeltingRate'};
-
-	md=solveiceocean(md,'Transient','runtimename',false);
-
-%	%eval(['!mpiexec -np ' int2str(md.cluster.np) ' ' md.cluster.codepath '/issm_ocean.exe TransientSolution ' pwd ' ' md.miscellaneous.name ' ']);
-%	eval(['!mpiexec -np ' int2str(md.cluster.np) ' ' md.cluster.codepath '/issm_ocean.exe TransientSolution ' pwd ' ' md.miscellaneous.name ' : -np ' int2str(nPx*nPy) ' ./mitgcmuv']);
+    md.timestepping.final_time=final_time-time_step;
+    md.cluster.npocean=nPx*nPy;
+    md.cluster.np=2;
+    md.cluster.executionpath=[pwd '/run'];
+    md.masstransport.requested_outputs={'default','BasalforcingsFloatingiceMeltingRate'};
+
+    md=solveiceocean(md,'Transient','runtimename',false);
+
+%   %eval(['!mpiexec -np ' int2str(md.cluster.np) ' ' md.cluster.codepath '/issm_ocean.exe TransientSolution ' pwd ' ' md.miscellaneous.name ' ']);
+%   eval(['!mpiexec -np ' int2str(md.cluster.np) ' ' md.cluster.codepath '/issm_ocean.exe TransientSolution ' pwd ' ' md.miscellaneous.name ' : -np ' int2str(nPx*nPy) ' ./mitgcmuv']);
 end
 % }}}
@@ -1027,32 +1028,32 @@
 melting_rate_4=readbin(fnm,[Nx Ny]);
 field_names     ={'Base1','Melting1','Vx2','Vy2','Thickness2','Base2','MaskGroundediceLevelset2','FloatingiceMeltingRate2',...
-	'Melting2','Vx3','Vy3','Thickness3','Base3','MaskGroundediceLevelset3','FloatingiceMeltingRate3',...
-	'Melting3','Vx4','Vy4','Thickness4','Base4','MaskGroundediceLevelset4','FloatingiceMeltingRate4','Melting4'};
+    'Melting2','Vx3','Vy3','Thickness3','Base3','MaskGroundediceLevelset3','FloatingiceMeltingRate3',...
+    'Melting3','Vx4','Vy4','Thickness4','Base4','MaskGroundediceLevelset4','FloatingiceMeltingRate4','Melting4'};
 field_tolerances={2e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,1e-13,...
-	1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,...
-	1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
+    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13,...
+    1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
 field_values={...
-	(md.results.TransientSolution(1).Base),...
-	(melting_rate_1(:)),...
-	(md.results.TransientSolution(2).Vx),...
-	(md.results.TransientSolution(2).Vy),...
-	(md.results.TransientSolution(2).Thickness),...
-	(md.results.TransientSolution(2).Base),...
-	(md.results.TransientSolution(2).MaskGroundediceLevelset),...
-	(md.results.TransientSolution(2).BasalforcingsFloatingiceMeltingRate),...
-	(melting_rate_2(:)),...
-	(md.results.TransientSolution(3).Vx),...
-	(md.results.TransientSolution(3).Vy),...
-	(md.results.TransientSolution(3).Thickness),...
-	(md.results.TransientSolution(3).Base),...
-	(md.results.TransientSolution(3).MaskGroundediceLevelset),...
-	(md.results.TransientSolution(3).BasalforcingsFloatingiceMeltingRate),...
-	(melting_rate_3(:)),...
-	(md.results.TransientSolution(4).Vx),...
-	(md.results.TransientSolution(4).Vy),...
-	(md.results.TransientSolution(4).Thickness),...
-	(md.results.TransientSolution(4).Base),...
-	(md.results.TransientSolution(4).MaskGroundediceLevelset),...
-	(md.results.TransientSolution(4).BasalforcingsFloatingiceMeltingRate),...
-	(melting_rate_4(:)),...
-	};
+    (md.results.TransientSolution(1).Base),...
+    (melting_rate_1(:)),...
+    (md.results.TransientSolution(2).Vx),...
+    (md.results.TransientSolution(2).Vy),...
+    (md.results.TransientSolution(2).Thickness),...
+    (md.results.TransientSolution(2).Base),...
+    (md.results.TransientSolution(2).MaskGroundediceLevelset),...
+    (md.results.TransientSolution(2).BasalforcingsFloatingiceMeltingRate),...
+    (melting_rate_2(:)),...
+    (md.results.TransientSolution(3).Vx),...
+    (md.results.TransientSolution(3).Vy),...
+    (md.results.TransientSolution(3).Thickness),...
+    (md.results.TransientSolution(3).Base),...
+    (md.results.TransientSolution(3).MaskGroundediceLevelset),...
+    (md.results.TransientSolution(3).BasalforcingsFloatingiceMeltingRate),...
+    (melting_rate_3(:)),...
+    (md.results.TransientSolution(4).Vx),...
+    (md.results.TransientSolution(4).Vy),...
+    (md.results.TransientSolution(4).Thickness),...
+    (md.results.TransientSolution(4).Base),...
+    (md.results.TransientSolution(4).MaskGroundediceLevelset),...
+    (md.results.TransientSolution(4).BasalforcingsFloatingiceMeltingRate),...
+    (melting_rate_4(:)),...
+    };
Index: /issm/trunk/test/NightlyRun/test4003.m
===================================================================
--- /issm/trunk/test/NightlyRun/test4003.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test4003.m	(revision 24686)
@@ -3,14 +3,15 @@
 %
 %Script control parameters
-steps=[1 2 3 4 5 6 7 8 9 10 11];
+steps=1:11;
 final_time=1/365;
 
 %To download and recompile MITgcm from scratch:
-!rm -rf $ISSM_DIR/test/MITgcm/install_dngoldberg
-!rm -rf $ISSM_DIR/test/MITgcm/build/*
+!rm -rf ${ISSM_DIR}/test/MITgcm/install_dngoldberg
+!rm -rf ${ISSM_DIR}/test/MITgcm/build/*
+!rm -rf Models
 
 %Organizer
-mkdir Models
-org=organizer('repository','Models/','prefix','IceOcean.','steps',steps);
+!mkdir Models
+org=organizer('repository','Models','prefix','IceOcean.','steps',steps);
 
 presentdirectory=pwd;
Index: /issm/trunk/test/NightlyRun/test404.m
===================================================================
--- /issm/trunk/test/NightlyRun/test404.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test404.m	(revision 24686)
@@ -10,5 +10,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Vx','Vy','Vz','Vel','Pressure'};
-field_tolerances={2e-06,4e-06,2e-06,1e-06,8e-07};
+field_tolerances={4e-06,6e-06,2e-06,1e-06,8e-07};
 field_values={...
 	(md.results.StressbalanceSolution.Vx),...
Index: /issm/trunk/test/NightlyRun/test404.py
===================================================================
--- /issm/trunk/test/NightlyRun/test404.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test404.py	(revision 24686)
@@ -19,5 +19,5 @@
 #Fields and tolerances to track changes
 field_names = ['Vx', 'Vy', 'Vz', 'Vel', 'Pressure']
-field_tolerances = [2e-06, 4e-06, 2e-06, 1e-06, 8e-07]
+field_tolerances = [4e-06, 6e-06, 2e-06, 1e-06, 8e-07]
 field_values = [md.results.StressbalanceSolution.Vx,
                 md.results.StressbalanceSolution.Vy,
Index: /issm/trunk/test/NightlyRun/test421.m
===================================================================
--- /issm/trunk/test/NightlyRun/test421.m	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test421.m	(revision 24686)
@@ -10,5 +10,5 @@
 %Fields and tolerances to track changes
 field_names     ={'Vx','Vy','Vz','Vel','Pressure'};
-field_tolerances={2e-06,2e-06,2e-05,2e-06,2e-06};
+field_tolerances={2e-06,2e-06,2e-05,2e-06,3e-06};
 field_values={...
 	(md.results.StressbalanceSolution.Vx),...
Index: /issm/trunk/test/NightlyRun/test421.py
===================================================================
--- /issm/trunk/test/NightlyRun/test421.py	(revision 24685)
+++ /issm/trunk/test/NightlyRun/test421.py	(revision 24686)
@@ -19,5 +19,5 @@
 #Fields and tolerances to track changes
 field_names = ['Vx', 'Vy', 'Vz', 'Vel', 'Pressure']
-field_tolerances = [2e-06, 2e-06, 2e-05, 2e-06, 2e-06]
+field_tolerances = [2e-06, 2e-06, 2e-05, 2e-06, 3e-06]
 field_values = [md.results.StressbalanceSolution.Vx,
                 md.results.StressbalanceSolution.Vy,
Index: /issm/trunk/test/NightlyRun/test480.m
===================================================================
--- /issm/trunk/test/NightlyRun/test480.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test480.m	(revision 24686)
@@ -0,0 +1,40 @@
+%Test Name: TransientFrictionTsai
+md=triangle(model(),'../Exp/Square.exp',150000.);
+md=setmask(md,'../Exp/SquareShelf.exp','');
+md=parameterize(md,'../Par/SquareSheetShelf.par');
+md=extrude(md,4,1);
+md=setflowequation(md,'HO','all');
+md.transient.isthermal = 0;
+md.friction=frictiontsai(md.friction);
+md.friction.C = 20.e4*ones(md.mesh.numberofvertices,1);
+md.friction.f = 0.5*ones(md.mesh.numberofvertices,1);
+md.friction.m = 1./3.*ones(md.mesh.numberofelements,1);
+md.cluster=generic('name',oshostname(),'np',3);
+md=solve(md,'Transient');
+
+%Fields and tolerances to track changes
+field_names     ={'Vx1','Vy1','Vel1','Pressure1','Bed1','Surface1','Thickness1','Vx2','Vy2','Vel2','Pressure2','Bed2','Surface2','Thickness2','Vx3','Vy3','Vel3','Pressure3','Bed3','Surface3','Thickness3'};
+field_tolerances={2e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,2e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09};
+field_values={...
+	(md.results.TransientSolution(1).Vx),...
+	(md.results.TransientSolution(1).Vy),...
+	(md.results.TransientSolution(1).Vel),...
+	(md.results.TransientSolution(1).Pressure),...
+	(md.results.TransientSolution(1).Base),...
+	(md.results.TransientSolution(1).Surface),...
+	(md.results.TransientSolution(1).Thickness),...
+	(md.results.TransientSolution(2).Vx),...
+	(md.results.TransientSolution(2).Vy),...
+	(md.results.TransientSolution(2).Vel),...
+	(md.results.TransientSolution(2).Pressure),...
+	(md.results.TransientSolution(2).Base),...
+	(md.results.TransientSolution(2).Surface),...
+	(md.results.TransientSolution(2).Thickness),...
+	(md.results.TransientSolution(3).Vx),...
+	(md.results.TransientSolution(3).Vy),...
+	(md.results.TransientSolution(3).Vel),...
+	(md.results.TransientSolution(3).Pressure),...
+	(md.results.TransientSolution(3).Base),...
+	(md.results.TransientSolution(3).Surface),...
+	(md.results.TransientSolution(3).Thickness),...
+	};
Index: /issm/trunk/test/NightlyRun/test481.m
===================================================================
--- /issm/trunk/test/NightlyRun/test481.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test481.m	(revision 24686)
@@ -0,0 +1,40 @@
+%Test Name: TransientFrictionSchoof
+md=triangle(model(),'../Exp/Square.exp',150000.);
+md=setmask(md,'../Exp/SquareShelf.exp','');
+md=parameterize(md,'../Par/SquareSheetShelf.par');
+md=extrude(md,4,1);
+md=setflowequation(md,'HO','all');
+md.transient.isthermal = 0;
+md.friction=frictionschoof(md.friction);
+md.friction.C    = 20.e4*ones(md.mesh.numberofvertices,1);
+md.friction.Cmax = 0.5*ones(md.mesh.numberofvertices,1);
+md.friction.m    = 1./3.*ones(md.mesh.numberofelements,1);
+md.cluster=generic('name',oshostname(),'np',3);
+md=solve(md,'Transient');
+
+%Fields and tolerances to track changes
+field_names     ={'Vx1','Vy1','Vel1','Pressure1','Bed1','Surface1','Thickness1','Vx2','Vy2','Vel2','Pressure2','Bed2','Surface2','Thickness2','Vx3','Vy3','Vel3','Pressure3','Bed3','Surface3','Thickness3'};
+field_tolerances={2e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09,2e-09,1e-09,1e-09,1e-09,1e-09,1e-09,1e-09};
+field_values={...
+	(md.results.TransientSolution(1).Vx),...
+	(md.results.TransientSolution(1).Vy),...
+	(md.results.TransientSolution(1).Vel),...
+	(md.results.TransientSolution(1).Pressure),...
+	(md.results.TransientSolution(1).Base),...
+	(md.results.TransientSolution(1).Surface),...
+	(md.results.TransientSolution(1).Thickness),...
+	(md.results.TransientSolution(2).Vx),...
+	(md.results.TransientSolution(2).Vy),...
+	(md.results.TransientSolution(2).Vel),...
+	(md.results.TransientSolution(2).Pressure),...
+	(md.results.TransientSolution(2).Base),...
+	(md.results.TransientSolution(2).Surface),...
+	(md.results.TransientSolution(2).Thickness),...
+	(md.results.TransientSolution(3).Vx),...
+	(md.results.TransientSolution(3).Vy),...
+	(md.results.TransientSolution(3).Vel),...
+	(md.results.TransientSolution(3).Pressure),...
+	(md.results.TransientSolution(3).Base),...
+	(md.results.TransientSolution(3).Surface),...
+	(md.results.TransientSolution(3).Thickness),...
+	};
Index: /issm/trunk/test/NightlyRun/test900.m
===================================================================
--- /issm/trunk/test/NightlyRun/test900.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test900.m	(revision 24686)
@@ -0,0 +1,47 @@
+%Test Name:SquareNoDynUnConfinedHydroDC
+md=triangle(model(),'../Exp/Square.exp',100000.);
+md=setmask(md,'','');
+%reduced slab (20m long)
+md.mesh.x=md.mesh.x/5.0e4;
+md.mesh.y=md.mesh.y/5.0e4;
+md=parameterize(md,'../Par/SquareNoDyn.par');
+md.cluster=generic('name',oshostname(),'np',1);
+
+md.transient.ishydrology=1;
+md.hydrology=(hydrologydc);
+md.hydrology=initialize(md.hydrology,md);
+
+%Hydro Model Parameters
+md.hydrology.isefficientlayer=0;
+md.hydrology.sedimentlimit_flag=0;
+md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
+md.hydrology.rel_tol=1.0e-6;
+md.hydrology.penalty_lock=0;
+md.hydrology.max_iter=200;
+md.hydrology.transfer_flag=0;
+md.hydrology.unconfined_flag=1;
+%Sediment
+md.hydrology.sediment_porosity=0.1;
+md.hydrology.sediment_thickness=10.0;
+md.hydrology.sediment_transmitivity=(1.0e-3*md.hydrology.sediment_thickness)*ones(md.mesh.numberofvertices,1);
+%init
+md.initialization.sediment_head=-5.0*ones(md.mesh.numberofvertices,1);
+%BC
+md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
+pos=find(md.mesh.x==0);
+md.hydrology.spcsediment_head(pos)=0.5;
+
+md.timestepping.time_step=5/md.constants.yts; %5s steppin
+md.settings.output_frequency=2;
+md.timestepping.final_time=300/md.constants.yts; %500s run
+
+md=solve(md,'Transient');
+
+%fields to track, results can also be found in
+%Wang 2009 Fig 6b (jouranl of Hydrology)
+field_names={'SedimentWaterHead1',...
+	     'SedimentWaterHead2'};
+field_tolerances={1e-13,...
+		  1e-13};
+field_values={md.results.TransientSolution(11).SedimentHead,...
+	      md.results.TransientSolution(31).SedimentHead};
Index: /issm/trunk/test/NightlyRun/test900.py
===================================================================
--- /issm/trunk/test/NightlyRun/test900.py	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test900.py	(revision 24686)
@@ -0,0 +1,53 @@
+#Test Name:SquareNoDynUnConfinedHydroDC
+import numpy as np
+from model import *
+from setmask import *
+from triangle import triangle
+from parameterize import parameterize
+from solve import solve
+from socket import gethostname
+from generic import generic
+
+md = triangle(model(), '../Exp/Square.exp', 100000.)
+md = setmask(md, '', '')
+#reduced square (20m long)
+md.mesh.x = md.mesh.x / 5.0e4
+md.mesh.y = md.mesh.y / 5.0e4
+md = parameterize(md, '../Par/SquareNoDyn.py')
+md.cluster = generic('name', gethostname(), 'np', 1)
+
+md.transient.ishydrology = True
+md.hydrology = hydrologydc()
+md.hydrology = md.hydrology.initialize(md)
+
+
+#Hydro Model Parameters
+md.hydrology.isefficientlayer = 0
+md.hydrology.sedimentlimit_flag = 0
+md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
+md.hydrology.rel_tol = 1.0e-6
+md.hydrology.penalty_lock = 0
+md.hydrology.max_iter = 200
+md.hydrology.transfer_flag = 0
+md.hydrology.unconfined_flag = 1
+#Sediment
+md.hydrology.sediment_porosity = 0.1
+md.hydrology.sediment_thickness = 10.0
+md.hydrology.sediment_transmitivity = (1.0e-3 * md.hydrology.sediment_thickness) * np.ones((md.mesh.numberofvertices))
+#init
+md.initialization.sediment_head = -5.0 * np.ones((md.mesh.numberofvertices))
+#BC
+md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
+md.hydrology.spcsediment_head[np.where(md.mesh.x == 0)] = 0.5
+
+md.timestepping.time_step = 5 / md.constants.yts  #5s steppin
+md.settings.output_frequency = 2
+md.timestepping.final_time = 300 / md.constants.yts  #500s run
+
+md = solve(md, 'Transient')
+
+#fields to track, results can also be found in
+#Wang 2009 Fig 6b (journal of Hydrology)
+field_names = ['SedimentWaterHead1', 'SedimentWaterHead2']
+field_tolerances = [1e-13, 1e-13]
+field_values = [md.results.TransientSolution[10].SedimentHead, md.results.TransientSolution[30].SedimentHead]
Index: /issm/trunk/test/NightlyRun/test901.m
===================================================================
--- /issm/trunk/test/NightlyRun/test901.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test901.m	(revision 24686)
@@ -0,0 +1,33 @@
+%Test Name: SquareNoDynHydrologyDCOneLayer
+md=triangle(model(),'../Exp/Square.exp',100000.);
+md=setmask(md,'','');
+md=parameterize(md,'../Par/SquareNoDyn.par');
+md.cluster=generic('name',oshostname(),'np',1);
+
+md.transient.ishydrology=1;
+md.hydrology=(hydrologydc);
+md.hydrology=initialize(md.hydrology,md);
+
+md.hydrology.isefficientlayer=0;
+md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
+md.hydrology.sedimentlimit_flag=1;
+md.hydrology.sedimentlimit=8000.0;
+
+md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
+pos=find(md.mesh.y==0);
+md.hydrology.spcsediment_head(pos)=0.0;
+md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.sediment_transmitivity=3*ones(md.mesh.numberofvertices,1);
+md.timestepping.time_step=0;
+md.timestepping.final_time=1.0;
+md=solve(md,'Hydrology');
+
+%Fields and tolerances to track changes
+%you can also compare with an analitic solution, but it is exact
+%only if no limits are applied
+%analitic=(md.mesh.y.^2-2*md.mesh.y*1.0e6)*(-2.0/(2*md.constants.yts*md.hydrology.sediment_transmitivity))
+field_names     ={'SedimentWaterHead','SedimentHeadResidual'};
+field_tolerances={1e-13, 3e-10};
+field_values={md.results.HydrologySolution.SedimentHead,md.results.HydrologySolution.SedimentHeadResidual};
Index: /issm/trunk/test/NightlyRun/test901.py
===================================================================
--- /issm/trunk/test/NightlyRun/test901.py	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test901.py	(revision 24686)
@@ -0,0 +1,42 @@
+#Test Name: SquareNoDynHydrologyDCOneLayer
+import numpy as np
+from model import *
+from socket import gethostname
+from triangle import triangle
+from setmask import setmask
+from parameterize import parameterize
+from solve import solve
+from generic import generic
+
+md = triangle(model(), '../Exp/Square.exp', 100000.)
+md = setmask(md, '', '')
+md = parameterize(md, '../Par/SquareNoDyn.py')
+md.cluster = generic('name', gethostname(), 'np', 1)
+
+md.transient.ishydrology = True
+md.hydrology = hydrologydc()
+md.hydrology = md.hydrology.initialize(md)
+
+md.hydrology.isefficientlayer = 0
+md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
+md.hydrology.sedimentlimit_flag = 1
+md.hydrology.sedimentlimit = 8000.0
+
+md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
+md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
+pos = np.nonzero(md.mesh.y == 0.)[0]
+md.hydrology.spcsediment_head[pos] = 0.0
+md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
+md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices))
+md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
+md.timestepping.time_step = 0
+md.timestepping.final_time = 1.0
+md = solve(md, 'Hydrology')
+
+#Fields and tolerances to track changes
+#you can also compare with an analitic solution, but it is exact
+#only if no limits are applied
+#analitic=(md.mesh.y**2 - 2 * md.mesh.y * 1.0e6) * (-2.0 / (2 * md.constants.yts * md.hydrology.sediment_transmitivity))
+field_names = ['SedimentWaterHead', 'SedimentHeadResidual']
+field_tolerances = [1e-13, 3e-10]
+field_values = [md.results.HydrologySolution.SedimentHead, md.results.HydrologySolution.SedimentHeadResidual]
Index: /issm/trunk/test/NightlyRun/test902.m
===================================================================
--- /issm/trunk/test/NightlyRun/test902.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test902.m	(revision 24686)
@@ -0,0 +1,75 @@
+%Test Name: SquareSheetHydrologyDCTwoLayers
+md=triangle(model(),'../Exp/Square.exp',100000.);
+md=setmask(md,'','');
+md=parameterize(md,'../Par/SquareNoDyn.par');
+md.cluster=generic('name',oshostname(),'np',1);
+
+md.transient.ishydrology=1;
+md.hydrology=(hydrologydc);
+md.hydrology=initialize(md.hydrology,md);
+
+md.hydrology.isefficientlayer=1;
+md.hydrology.sedimentlimit_flag=1;
+md.hydrology.sedimentlimit=800.0;
+md.hydrology.transfer_flag = 0;
+md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
+md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
+
+md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.sediment_transmitivity=3*ones(md.mesh.numberofvertices,1);
+
+md.initialization.epl_head=0.0*ones(md.mesh.numberofvertices,1);
+md.initialization.epl_thickness=1.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcepl_head=NaN*ones(md.mesh.numberofvertices,1);
+md.hydrology.mask_eplactive_node=0*ones(md.mesh.numberofvertices,1);
+md.hydrology.epl_conductivity=30;
+md.hydrology.epl_initial_thickness=1;
+md.hydrology.epl_colapse_thickness=1.0e-3;
+md.hydrology.epl_thick_comp=1;
+md.hydrology.epl_max_thickness=1;
+md.hydrology.steps_per_step=10;
+md.timestepping.time_step=2.0;
+md.timestepping.final_time=2.0;
+
+md=solve(md,'Transient');
+
+%re-run with no substeps
+mdfine=md;
+mdfine.results=struct();
+mdfine.hydrology.steps_per_step=1;
+mdfine.timestepping.time_step=0.2;
+mdfine=solve(mdfine,'Transient');
+
+%Fields and tolerances to track changes
+field_names     ={'SedimentWaterHead1','EplWaterHead1','SedimentHeadResidual1',...
+                  'SedimentWaterHead4','EplWaterHead4','SedimentHeadResidual4',...
+                  'SedimentWaterHead5','EplWaterHead5','SedimentHeadResidual5',...
+                  'SedimentWaterHead9','EplWaterHead9','SedimentHeadResidual9',...
+                  'EplWaterHead10', 'EplWaterHeadSubstep10', 'SedimentWaterHead10',...
+		  'SedimentWaterHeadSubstep10'};
+field_tolerances={...
+    1e-13, 1e-13, 1e-13,...
+    1e-13, 1e-13, 1e-13,...
+    1e-13, 5e-12, 1e-11,...
+    1e-13, 5e-12, 1e-11,...
+    1e-13, 1e-13, 1e-13,...
+    1e-13};
+field_values={mdfine.results.TransientSolution(1).SedimentHead, ...
+	      mdfine.results.TransientSolution(1).EplHead,...
+	      mdfine.results.TransientSolution(1).SedimentHeadResidual,...
+	      mdfine.results.TransientSolution(4).SedimentHead,...
+	      mdfine.results.TransientSolution(4).EplHead,...
+	      mdfine.results.TransientSolution(4).SedimentHeadResidual, ...
+	      mdfine.results.TransientSolution(5).SedimentHead,...
+	      mdfine.results.TransientSolution(5).EplHead,...
+	      mdfine.results.TransientSolution(5).SedimentHeadResidual, ...
+	      mdfine.results.TransientSolution(9).SedimentHead,...
+	      mdfine.results.TransientSolution(9).EplHead,...
+	      mdfine.results.TransientSolution(9).SedimentHeadResidual,...
+              md.results.TransientSolution(1).EplHead,...
+              md.results.TransientSolution(1).EplHeadSubstep,...
+              md.results.TransientSolution(1).SedimentHead,...
+              md.results.TransientSolution(1).SedimentHeadSubstep
+              };
Index: /issm/trunk/test/NightlyRun/test902.py
===================================================================
--- /issm/trunk/test/NightlyRun/test902.py	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test902.py	(revision 24686)
@@ -0,0 +1,81 @@
+#Test Name: SquareNoDynHydrolgyDCTwoLayers
+import numpy as np
+from model import *
+from socket import gethostname
+from triangle import triangle
+from setmask import setmask
+from parameterize import parameterize
+from solve import solve
+from generic import generic
+
+md = triangle(model(), '../Exp/Square.exp', 100000.)
+md = setmask(md, '', '')
+md = parameterize(md, '../Par/SquareNoDyn.py')
+md.cluster = generic('name', gethostname(), 'np', 1)
+
+md.transient.ishydrology = True
+md.hydrology = hydrologydc()
+md.hydrology = md.hydrology.initialize(md)
+
+md.hydrology.isefficientlayer = 1
+md.hydrology.sedimentlimit_flag = 1
+md.hydrology.sedimentlimit = 800.0
+md.hydrology.transfer_flag = 0
+md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
+md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
+md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
+
+md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
+md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices))
+md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
+
+md.initialization.epl_head = np.zeros((md.mesh.numberofvertices))
+md.initialization.epl_thickness = np.ones((md.mesh.numberofvertices))
+md.hydrology.spcepl_head = np.nan * np.ones((md.mesh.numberofvertices))
+md.hydrology.mask_eplactive_node = np.zeros((md.mesh.numberofvertices))
+md.hydrology.epl_conductivity = 30
+md.hydrology.epl_initial_thickness = 1
+md.hydrology.epl_colapse_thickness = 1.0e-3
+md.hydrology.epl_thick_comp = 1
+md.hydrology.epl_max_thickness = 1
+md.hydrology.steps_per_step = 10
+md.timestepping.time_step = 2.0
+md.timestepping.final_time = 2.0
+
+md = solve(md, 'Transient')
+
+#re-run with no substeps
+mdfine = copy.deepcopy(md)
+mdfine.results = []
+mdfine.hydrology.steps_per_step = 1
+mdfine.timestepping.time_step = 0.2
+mdfine = solve(mdfine, 'Transient')
+
+field_names = ['SedimentWaterHead1', 'EplWaterHead1', 'SedimentHeadResidual1',
+               'SedimentWaterHead4', 'EplWaterHead4', 'SedimentHeadResidual4',
+               'SedimentWaterHead5', 'EplWaterHead5', 'SedimentHeadResidual5',
+               'SedimentWaterHead9', 'EplWaterHead9', 'SedimentHeadResidual9',
+               'EplWaterHead10', 'EplWaterHeadSubstep10', 'SedimentWaterHead10',
+               'SedimentWaterHeadSubstep10']
+field_tolerances = [1e-13, 1e-13, 1e-13,
+                    1e-13, 1e-13, 1e-13,
+                    1e-13, 5e-12, 1e-11,
+                    1e-13, 5e-12, 1e-11,
+                    1e-13, 1e-13, 1e-13,
+                    1e-13]
+field_values = [mdfine.results.TransientSolution[0].SedimentHead,
+                mdfine.results.TransientSolution[0].EplHead,
+                mdfine.results.TransientSolution[0].SedimentHeadResidual,
+                mdfine.results.TransientSolution[3].SedimentHead,
+                mdfine.results.TransientSolution[3].EplHead,
+                mdfine.results.TransientSolution[3].SedimentHeadResidual,
+                mdfine.results.TransientSolution[4].SedimentHead,
+                mdfine.results.TransientSolution[4].EplHead,
+                mdfine.results.TransientSolution[4].SedimentHeadResidual,
+                mdfine.results.TransientSolution[8].SedimentHead,
+                mdfine.results.TransientSolution[8].EplHead,
+                mdfine.results.TransientSolution[8].SedimentHeadResidual,
+                md.results.TransientSolution[-1].EplHead,
+                md.results.TransientSolution[-1].EplHeadSubstep,
+                md.results.TransientSolution[-1].SedimentHead,
+                md.results.TransientSolution[-1].SedimentHeadSubstep]
Index: /issm/trunk/test/NightlyRun/test903.m
===================================================================
--- /issm/trunk/test/NightlyRun/test903.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test903.m	(revision 24686)
@@ -0,0 +1,35 @@
+%Test Name: SquareNoDynExtrudedHydrologyDCOneLayer
+md=triangle(model(),'../Exp/Square.exp',100000.);
+md=setmask(md,'','');
+md=parameterize(md,'../Par/SquareNoDyn.par');
+md.cluster=generic('name',oshostname(),'np',1);
+
+md.transient.ishydrology=1;
+md.hydrology=(hydrologydc);
+md.hydrology=initialize(md.hydrology,md);
+
+md.hydrology.isefficientlayer=0;
+md.hydrology.sedimentlimit_flag=1;
+md.hydrology.sedimentlimit=8000.0;
+md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
+md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
+pos=find(md.mesh.y==0);
+md.hydrology.spcsediment_head(pos)=0.0;
+
+md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.sediment_transmitivity= 3.0*ones(md.mesh.numberofvertices,1);
+
+md.timestepping.time_step=0;
+md.timestepping.final_time=1.0;
+md=extrude(md,3,1.1);
+md=solve(md,'Hydrology');
+
+%Fields and tolerances to track changes
+%you can also compare with an analitic solution, but it is exact
+%only if no limits are applied
+%analitic=(md.mesh.y.^2-2*md.mesh.y*1.0e6)*(-2.0/(2*md.constants.yts*md.hydrology.sediment_transmitivity))
+field_names     ={'SedimentWaterHead','SedimentHeadResidual'};
+field_tolerances={1e-13, 3e-10};
+field_values={md.results.HydrologySolution.SedimentHead,md.results.HydrologySolution.SedimentHeadResidual};
Index: /issm/trunk/test/NightlyRun/test903.py
===================================================================
--- /issm/trunk/test/NightlyRun/test903.py	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test903.py	(revision 24686)
@@ -0,0 +1,43 @@
+#Test Name: SquareNoDynExtrudedHydrologyDCOneLayer
+import numpy as np
+from model import *
+from socket import gethostname
+from triangle import triangle
+from setmask import setmask
+from parameterize import parameterize
+from solve import solve
+from generic import generic
+
+md = triangle(model(), '../Exp/Square.exp', 100000.)
+md = setmask(md, '', '')
+md = parameterize(md, '../Par/SquareNoDyn.py')
+md.cluster = generic('name', gethostname(), 'np', 1)
+
+md.transient.ishydrology = True
+md.hydrology = hydrologydc()
+md.hydrology = md.hydrology.initialize(md)
+
+md.hydrology.isefficientlayer = 0
+md.hydrology.sedimentlimit_flag = 1
+md.hydrology.sedimentlimit = 8000.0
+md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
+md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
+md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
+md.hydrology.spcsediment_head[np.where(md.mesh.y == 0)] = 0.0
+
+md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
+md.basalforcings.floatingice_melting_rate = 0.0 * np.ones((md.mesh.numberofvertices))
+md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
+
+md.timestepping.time_step = 0
+md.timestepping.final_time = 1.0
+md.extrude(3, 1.)
+md = solve(md, 'Hydrology')
+
+#Fields and tolerances to track changes
+#you can also compare with an analitic solution, but it is exact
+#only if no limits are applied
+#analitic=(md.mesh.y.^2 - 2 * md.mesh.y * 1.0e6) * (-2.0 / (2 * md.constants.yts * md.hydrology.sediment_transmitivity))
+field_names = ['SedimentWaterHead', 'SedimentHeadResidual']
+field_tolerances = [1e-13, 3e-10]
+field_values = [md.results.HydrologySolution.SedimentHead, md.results.HydrologySolution.SedimentHeadResidual]
Index: /issm/trunk/test/NightlyRun/test904.m
===================================================================
--- /issm/trunk/test/NightlyRun/test904.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test904.m	(revision 24686)
@@ -0,0 +1,57 @@
+%Test Name: SquareNoDynExtrudedHydrologyDCTwoLayers
+md=triangle(model(),'../Exp/Square.exp',100000.);
+md=setmask(md,'','');
+md=parameterize(md,'../Par/SquareNoDyn.par');
+md.cluster=generic('name',oshostname(),'np',1);
+
+md.transient.ishydrology=1;
+md.hydrology=(hydrologydc);
+md.hydrology=initialize(md.hydrology,md);
+
+md.hydrology.isefficientlayer=1;
+md.hydrology.sedimentlimit_flag=1;
+md.hydrology.transfer_flag = 0;
+md.hydrology.sedimentlimit=800.0;
+md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
+md.initialization.sediment_head=0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
+md.basalforcings.groundedice_melting_rate = 2.0*ones(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate = 0.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.sediment_transmitivity=3*ones(md.mesh.numberofvertices,1);
+
+md.initialization.epl_head=0.0*ones(md.mesh.numberofvertices,1);
+md.initialization.epl_thickness=1.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcepl_head=NaN*ones(md.mesh.numberofvertices,1);
+md.hydrology.mask_eplactive_node=0*ones(md.mesh.numberofvertices,1);
+md.hydrology.epl_conductivity=30;
+md.hydrology.epl_initial_thickness=1;
+md.hydrology.epl_colapse_thickness=1.0e-3;
+md.hydrology.epl_thick_comp=1;
+md.hydrology.epl_max_thickness=1;
+md.timestepping.time_step=0.2;
+md.timestepping.final_time=2.0;
+
+md=extrude(md,3,1.);
+md=solve(md,'Transient');
+
+%Fields and tolerances to track changes
+field_names     ={'SedimentWaterHead1','EplWaterHead1','SedimentHeadResidual1',...
+		  'SedimentWaterHead4','EplWaterHead4','SedimentHeadResidual4',...
+		  'SedimentWaterHead5','EplWaterHead5','SedimentHeadResidual5',...
+		  'SedimentWaterHead9','EplWaterHead9','SedimentHeadResidual9'};
+field_tolerances={1e-13, 1e-13, 1e-13,...
+		  1e-13, 1e-13, 1e-13,...
+		  1e-13, 5e-12, 2e-11,...
+		  1e-13, 5e-12, 2e-11};
+field_values={md.results.TransientSolution(1).SedimentHead, ...
+	      md.results.TransientSolution(1).EplHead,...
+	      md.results.TransientSolution(1).SedimentHeadResidual,...
+	      md.results.TransientSolution(4).SedimentHead,...
+	      md.results.TransientSolution(4).EplHead,...
+	      md.results.TransientSolution(4).SedimentHeadResidual, ...
+	      md.results.TransientSolution(5).SedimentHead,...
+	      md.results.TransientSolution(5).EplHead,...
+	      md.results.TransientSolution(5).SedimentHeadResidual, ...
+	      md.results.TransientSolution(9).SedimentHead,...
+	      md.results.TransientSolution(9).EplHead,...
+	      md.results.TransientSolution(9).SedimentHeadResidual};
Index: /issm/trunk/test/NightlyRun/test904.py
===================================================================
--- /issm/trunk/test/NightlyRun/test904.py	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test904.py	(revision 24686)
@@ -0,0 +1,66 @@
+#Test Name: SquareNoDynExtrudedHydrologyDCTwoLayers
+import numpy as np
+from model import *
+from socket import gethostname
+from triangle import triangle
+from setmask import setmask
+from parameterize import parameterize
+from solve import solve
+from generic import generic
+
+md = triangle(model(), '../Exp/Square.exp', 100000.)
+md = setmask(md, '', '')
+md = parameterize(md, '../Par/SquareNoDyn.py')
+md.cluster = generic('name', gethostname(), 'np', 1)
+
+md.transient.ishydrology = True
+md.hydrology = hydrologydc()
+md.hydrology = md.hydrology.initialize(md)
+
+md.hydrology.isefficientlayer = 1
+md.hydrology.sedimentlimit_flag = 1
+md.hydrology.transfer_flag = 0
+md.hydrology.sedimentlimit = 800.0
+md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
+md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
+md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
+md.basalforcings.groundedice_melting_rate = 2.0 * np.ones((md.mesh.numberofvertices))
+md.basalforcings.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices))
+md.hydrology.sediment_transmitivity = 3 * np.ones((md.mesh.numberofvertices))
+
+md.initialization.epl_head = np.zeros((md.mesh.numberofvertices))
+md.initialization.epl_thickness = np.ones((md.mesh.numberofvertices))
+md.hydrology.spcepl_head = np.nan * np.ones((md.mesh.numberofvertices))
+md.hydrology.mask_eplactive_node = np.zeros((md.mesh.numberofvertices))
+md.hydrology.epl_conductivity = 30
+md.hydrology.epl_initial_thickness = 1
+md.hydrology.epl_colapse_thickness = 1.0e-3
+md.hydrology.epl_thick_comp = 1
+md.hydrology.epl_max_thickness = 1
+md.timestepping.time_step = 0.2
+md.timestepping.final_time = 2.0
+
+md.extrude(3, 1.)
+md = solve(md, 'Transient')
+
+#Fields and tolerances to track changes
+field_names = ['SedimentWaterHead1', 'EplWaterHead1', 'SedimentHeadResidual1',
+               'SedimentWaterHead4', 'EplWaterHead4', 'SedimentHeadResidual4',
+               'SedimentWaterHead5', 'EplWaterHead5', 'SedimentHeadResidual5',
+               'SedimentWaterHead9', 'EplWaterHead9', 'SedimentHeadResidual9']
+field_tolerances = [1e-13, 1e-13, 1e-13,
+                    1e-13, 1e-13, 1e-13,
+                    1e-13, 5e-12, 2e-11,
+                    1e-13, 5e-12, 2e-11]
+field_values = [md.results.TransientSolution[0].SedimentHead,
+                md.results.TransientSolution[0].EplHead,
+                md.results.TransientSolution[0].SedimentHeadResidual,
+                md.results.TransientSolution[3].SedimentHead,
+                md.results.TransientSolution[3].EplHead,
+                md.results.TransientSolution[3].SedimentHeadResidual,
+                md.results.TransientSolution[4].SedimentHead,
+                md.results.TransientSolution[4].EplHead,
+                md.results.TransientSolution[4].SedimentHeadResidual,
+                md.results.TransientSolution[8].SedimentHead,
+                md.results.TransientSolution[8].EplHead,
+                md.results.TransientSolution[8].SedimentHeadResidual]
Index: /issm/trunk/test/NightlyRun/test905.m
===================================================================
--- /issm/trunk/test/NightlyRun/test905.m	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test905.m	(revision 24686)
@@ -0,0 +1,84 @@
+%Test Name: SquareNoDynHydrologyDCSmbCoupled
+md=triangle(model(),'../Exp/Square.exp',100000.);
+md=setmask(md,'','');
+md=parameterize(md,'../Par/SquareNoDyn.par');
+md.cluster = generic('name',oshostname(),'np',1);
+
+md.transient.ishydrology=1;
+md.transient.issmb=1;
+md.hydrology=(hydrologydc);
+md.hydrology=initialize(md.hydrology,md);
+md.smb=(SMBgradientscomponents);
+
+md.hydrology.isefficientlayer=1;
+
+md.hydrology.sedimentlimit_flag=1;
+md.hydrology.sedimentlimit=400.0;
+md.hydrology.sediment_transmitivity=3.0*ones(md.mesh.numberofvertices,1);
+md.hydrology.mask_thawed_node=ones(md.mesh.numberofvertices,1);
+
+md.hydrology.mask_eplactive_node=zeros(md.mesh.numberofvertices,1);
+md.hydrology.epl_conductivity=3.;
+md.hydrology.epl_initial_thickness=20;
+md.hydrology.epl_colapse_thickness=1.0e-3;
+md.hydrology.epl_thick_comp=0;
+md.hydrology.epl_max_thickness=1;
+
+md.hydrology.spcsediment_head=NaN*ones(md.mesh.numberofvertices,1);
+md.hydrology.spcepl_head=NaN*ones(md.mesh.numberofvertices,1);
+
+md.initialization.sediment_head=zeros(md.mesh.numberofvertices,1);
+md.initialization.epl_head=zeros(md.mesh.numberofvertices,1);
+md.initialization.epl_thickness=ones(md.mesh.numberofvertices,1);
+
+md.hydrology.steps_per_step=5;
+md.smb.steps_per_step=7.;
+md.timestepping.time_step=2.;
+md.timestepping.final_time=20.0;
+
+smb_step=md.timestepping.time_step/md.smb.steps_per_step;
+duration=[md.timestepping.start_time:smb_step:md.timestepping.final_time];
+
+ddf=10.0e-3;
+md.smb.accuref=[[0.5 0.5];[0. 2.0]];
+md.smb.accualti=0.0;
+md.smb.accugrad=0.0;
+
+md.smb.runoffref=0.9*duration*ddf;
+md.smb.runoffref=[md.smb.runoffref;duration];
+md.smb.runoffalti=0.0;
+md.smb.runoffgrad=-6.5e-3*ddf;  %lapse rate *ddf*day per year
+
+md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
+
+md=solve(md,'Transient');
+
+field_names={'SedimentWaterHead1','EplWaterHead1','SedimentHeadResidual1',...
+	     'SedimentWaterHead4','EplWaterHead4','SedimentHeadResidual4',...
+	     'SedimentWaterHead5','EplWaterHead5','SedimentHeadResidual5',...
+	     'SedimentWaterHead9','EplWaterHead9','SedimentHeadResidual9',...
+	     'EplWaterHead10','EplWaterHeadSubstep10','SedimentWaterHead10',...
+	     'SedimentWaterHeadSubstep10'}
+field_tolerances={1e-13,1e-13,1e-13,...
+		  1e-13,1e-13,1e-13,...
+		  1e-13,5e-12,1e-11,...
+		  1e-13,5e-12,1e-11,...
+		  1e-13,1e-13,1e-13,...
+		  1e-13};
+field_values={md.results.TransientSolution(1).SedimentHead,...
+	      md.results.TransientSolution(1).EplHead,...
+	      md.results.TransientSolution(1).SedimentHeadResidual,...
+	      md.results.TransientSolution(4).SedimentHead,...
+	      md.results.TransientSolution(4).EplHead,...
+	      md.results.TransientSolution(4).SedimentHeadResidual,...
+	      md.results.TransientSolution(5).SedimentHead,...
+	      md.results.TransientSolution(5).EplHead,...
+	      md.results.TransientSolution(5).SedimentHeadResidual,...
+	      md.results.TransientSolution(9).SedimentHead,...
+	      md.results.TransientSolution(9).EplHead,...
+	      md.results.TransientSolution(9).SedimentHeadResidual,...
+	      md.results.TransientSolution(end).EplHead,...
+	      md.results.TransientSolution(end).EplHeadSubstep,...
+	      md.results.TransientSolution(end).SedimentHead,...
+	      md.results.TransientSolution(end).SedimentHeadSubstep};
Index: /issm/trunk/test/NightlyRun/test905.py
===================================================================
--- /issm/trunk/test/NightlyRun/test905.py	(revision 24686)
+++ /issm/trunk/test/NightlyRun/test905.py	(revision 24686)
@@ -0,0 +1,94 @@
+#Test Name: SquareNoDynHydrologyDCSmbCoupled
+import numpy as np
+from model import *
+from socket import gethostname
+from triangle import triangle
+from setmask import setmask
+from parameterize import parameterize
+from solve import solve
+from generic import generic
+from SMBgradientscomponents import SMBgradientscomponents
+
+md = triangle(model(), '../Exp/Square.exp', 100000.)
+md = setmask(md, '', '')
+md = parameterize(md, '../Par/SquareNoDyn.py')
+md.cluster = generic('name', gethostname(), 'np', 1)
+
+md.transient.ishydrology = True
+md.transient.issmb = True
+md.hydrology = hydrologydc()
+md.hydrology = md.hydrology.initialize(md)
+md.smb = SMBgradientscomponents()
+
+md.hydrology.isefficientlayer = 1
+
+md.hydrology.sedimentlimit_flag = 1
+md.hydrology.sedimentlimit = 400.0
+md.hydrology.sediment_transmitivity = 3.0 * np.ones((md.mesh.numberofvertices))
+md.hydrology.mask_thawed_node = np.ones((md.mesh.numberofvertices))
+
+md.hydrology.mask_eplactive_node = np.zeros((md.mesh.numberofvertices))
+md.hydrology.epl_conductivity = 3.
+md.hydrology.epl_initial_thickness = 20
+md.hydrology.epl_colapse_thickness = 1.0e-3
+md.hydrology.epl_thick_comp = 0
+md.hydrology.epl_max_thickness = 1
+
+md.hydrology.spcsediment_head = np.nan * np.ones((md.mesh.numberofvertices))
+md.hydrology.spcepl_head = np.nan * np.ones((md.mesh.numberofvertices))
+
+md.initialization.sediment_head = np.zeros((md.mesh.numberofvertices))
+md.initialization.epl_head = np.zeros((md.mesh.numberofvertices))
+md.initialization.epl_thickness = np.ones((md.mesh.numberofvertices))
+
+md.hydrology.steps_per_step = 5
+md.smb.steps_per_step = 7  #md.hydrology.steps_per_step
+md.timestepping.time_step = 2.
+md.timestepping.final_time = 20.0
+
+smb_step = md.timestepping.time_step / md.smb.steps_per_step
+duration = np.arange(md.timestepping.start_time, md.timestepping.final_time + smb_step, smb_step)
+
+ddf = 10.0e-3
+md.smb.accuref = np.array([[0.5, 0.5], [0., 2.0]])
+md.smb.accualti = 0.0
+md.smb.accugrad = 0.0
+
+md.smb.runoffref = 0.9 * duration * ddf
+md.smb.runoffref = np.vstack((md.smb.runoffref, duration))
+md.smb.runoffalti = 0.0
+md.smb.runoffgrad = -6.5e-3 * ddf  #lapse rate *ddf*day per year
+
+md.basalforcings.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices))
+md.basalforcings.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices))
+
+md = solve(md, 'Transient')
+
+field_names = ['SedimentWaterHead1', 'EplWaterHead1', 'SedimentHeadResidual1',
+               'SedimentWaterHead4', 'EplWaterHead4', 'SedimentHeadResidual4',
+               'SedimentWaterHead5', 'EplWaterHead5', 'SedimentHeadResidual5',
+               'SedimentWaterHead9', 'EplWaterHead9', 'SedimentHeadResidual9',
+               'EplWaterHead10', 'EplWaterHeadSubstep10', 'SedimentWaterHead10',
+               'SedimentWaterHeadSubstep10']
+field_tolerances = [1e-13, 1e-13, 1e-13,
+                    1e-13, 1e-13, 1e-13,
+                    1e-13, 5e-12, 1e-11,
+                    1e-13, 5e-12, 1e-11,
+                    1e-13, 1e-13, 1e-13,
+                    1e-13]
+field_values = [md.results.TransientSolution[0].SedimentHead,
+                md.results.TransientSolution[0].EplHead,
+                md.results.TransientSolution[0].SedimentHeadResidual,
+                md.results.TransientSolution[3].SedimentHead,
+                md.results.TransientSolution[3].EplHead,
+                md.results.TransientSolution[3].SedimentHeadResidual,
+                md.results.TransientSolution[4].SedimentHead,
+                md.results.TransientSolution[4].EplHead,
+                md.results.TransientSolution[4].SedimentHeadResidual,
+                md.results.TransientSolution[8].SedimentHead,
+                md.results.TransientSolution[8].EplHead,
+                md.results.TransientSolution[8].SedimentHeadResidual,
+                md.results.TransientSolution[-1].EplHead,
+                md.results.TransientSolution[-1].EplHeadSubstep,
+                md.results.TransientSolution[-1].SedimentHead,
+                md.results.TransientSolution[-1].SedimentHeadSubstep]
Index: sm/trunk/test/NightlyRun/yellowstone.lsf
===================================================================
--- /issm/trunk/test/NightlyRun/yellowstone.lsf	(revision 24685)
+++ 	(revision )
@@ -1,36 +1,0 @@
-!/bin/tcsh
-#BSUB -P P93300301
-#BSUB -W 12:0
-#BSUB -n 1
-#BSUB -J test101
-#BSUB -o /glade/u/home/elarour/trunk-jpl/exeuction/test101-03-06-2017-16-12-56-77385/test101.out 
-#BSUB -o /glade/u/home/elarour/trunk-jpl/exeuction/test101-03-06-2017-16-12-56-77385/test101.err 
-#BSUB -q premium
-module load ncarenv/1.0
-module load ncarbinlibs/1.1
-module load perlmods/5.0
-module load gmake/4.1
-module load python/2.7.7
-module load all-python-libs
-module load git/2.3.0
-module load intel/15.0.3
-module load mkl/11.1.2
-module load esmf
-module load esmf-6.3.0rp1-defio-mpi-O
-module load netcdf-mpi/4.3.3.1
-module load pnetcdf/1.6.1
-module load ncarcompilers/1.0
-module load cmake/3.0.2
-module load matlab/R2015b
-module load issm
-export OMP_STACKSIZE=256M
-export MP_LABELIO=yes
-export MP_INFOLEVEL=2
-export MP_SHARED_MEMORY=yes
-export MP_EUILIB=us
-export MP_STDOUTMODE=unordered
-export MP_RC_USE_LMC=yes
-export MP_MPILIB=mpich2
-cd /glade/u/home/elarour/trunk-jpl/exeuction/test101-03-06-2017-16-12-56-77385/
-
-mpirun.lsf -np 1 /glade/u/home/elarour/trunk-jpl/build-fw/bin/issm.exe StressbalanceSolution /glade/u/home/elarour/trunk-jpl/exeuction/test101-03-06-2017-16-12-56-77385 test101
Index: sm/trunk/test/NightlyRun/yellowstone.m
===================================================================
--- /issm/trunk/test/NightlyRun/yellowstone.m	(revision 24685)
+++ 	(revision )
@@ -1,46 +1,0 @@
-%Test Name: SquareShelfConstrainedStressSSA2d
-md=triangle(model(),'../Exp/Square.exp',50000.);
-md=setmask(md,'all','');
-md=parameterize(md,'../Par/SquareShelfConstrained.par');
-md=setflowequation(md,'SSA','all');
-
-%md.cluster=generic('name',oshostname(),'np',2);
-md.cluster=yellowstone;
-md.cluster.login='elarour';
-md.cluster.codepath='/glade/u/home/elarour/trunk-jpl/build-fw/bin';
-md.cluster.executionpath='/glade/u/home/elarour/trunk-jpl/execution';
-
-%output
-md.stressbalance.requested_outputs={'default','DeviatoricStressxx','DeviatoricStressyy','DeviatoricStressxy','MassFlux1','MassFlux2','MassFlux3','MassFlux4','MassFlux5','MassFlux6'};
-md.outputdefinition.definitions={...
-	massfluxatgate('name','MassFlux1','profilename',['../Exp/MassFlux1.exp'],'definitionstring','Outputdefinition1'),...
-	massfluxatgate('name','MassFlux2','profilename',['../Exp/MassFlux2.exp'],'definitionstring','Outputdefinition2'),...
-	massfluxatgate('name','MassFlux3','profilename',['../Exp/MassFlux3.exp'],'definitionstring','Outputdefinition3'),...
-	massfluxatgate('name','MassFlux4','profilename',['../Exp/MassFlux4.exp'],'definitionstring','Outputdefinition4'),...
-	massfluxatgate('name','MassFlux5','profilename',['../Exp/MassFlux5.exp'],'definitionstring','Outputdefinition5'),...
-	massfluxatgate('name','MassFlux6','profilename',['../Exp/MassFlux6.exp'],'definitionstring','Outputdefinition6')...
-	};
-
-md=solve(md,'Stressbalance');
-
-%Fields and tolerances to track changes
-field_names     ={'Vx','Vy','Vel','Pressure',...
-	'DeviatoricStressxx','DeviatoricStressyy','DeviatoricStressxy','MassFlux1','MassFlux2','MassFlux3','MassFlux4','MassFlux5','MassFlux6'};
-field_tolerances={2e-13,1e-13,1e-13,1e-13,...
-	1e-13,1e-13,1e-13,...
-	1e-13, 1e-13, 1e-13, 1e-13, 1e-13, 1e-13 };
-field_values={...
-	(md.results.StressbalanceSolution.Vx),...
-	(md.results.StressbalanceSolution.Vy),...
-	(md.results.StressbalanceSolution.Vel),...
-	(md.results.StressbalanceSolution.Pressure),...
-	(md.results.StressbalanceSolution.DeviatoricStressxx),...
-	(md.results.StressbalanceSolution.DeviatoricStressyy),...
-	(md.results.StressbalanceSolution.DeviatoricStressxy),...
-	(md.results.StressbalanceSolution.MassFlux1),...
-	(md.results.StressbalanceSolution.MassFlux2),...
-	(md.results.StressbalanceSolution.MassFlux3),...
-	(md.results.StressbalanceSolution.MassFlux4),...
-	(md.results.StressbalanceSolution.MassFlux5),...
-	(md.results.StressbalanceSolution.MassFlux6)...
-	};
Index: /issm/trunk/test/Par/SquareNoDyn.par
===================================================================
--- /issm/trunk/test/Par/SquareNoDyn.par	(revision 24686)
+++ /issm/trunk/test/Par/SquareNoDyn.par	(revision 24686)
@@ -0,0 +1,47 @@
+%Start defining model parameters here
+
+%Geometry
+md.geometry.thickness=1000.0*ones(md.mesh.numberofvertices,1);
+md.geometry.base=zeros(md.mesh.numberofvertices,1);
+md.geometry.surface=md.geometry.base+md.geometry.thickness;
+
+%Materials
+md.initialization.temperature=(273.-20.)*ones(md.mesh.numberofvertices,1);
+md.materials.rheology_B=paterson(md.initialization.temperature);
+md.materials.rheology_n=3.*ones(md.mesh.numberofelements,1);
+
+%Friction
+md.friction.coefficient=20.*ones(md.mesh.numberofvertices,1);
+md.friction.coefficient(find(md.mask.groundedice_levelset<0.))=0.;
+md.friction.p=ones(md.mesh.numberofelements,1);
+md.friction.q=ones(md.mesh.numberofelements,1);
+
+%Some necessary fields to fool checkonsistency
+md.initialization.vx=zeros(md.mesh.numberofvertices,1);
+md.initialization.vy=zeros(md.mesh.numberofvertices,1);
+md.initialization.vz=zeros(md.mesh.numberofvertices,1);
+md.initialization.pressure=zeros(md.mesh.numberofvertices,1);
+
+md.stressbalance.spcvx=zeros(md.mesh.numberofvertices,1);
+md.stressbalance.spcvy=zeros(md.mesh.numberofvertices,1);
+md.stressbalance.spcvz=zeros(md.mesh.numberofvertices,1);
+
+md.stressbalance.referential=NaN(md.mesh.numberofvertices,6);
+md.stressbalance.loadingforce=zeros(md.mesh.numberofvertices,3);
+
+md.smb.mass_balance=zeros(md.mesh.numberofvertices,1);
+
+md.basalforcings.groundedice_melting_rate=zeros(md.mesh.numberofvertices,1);
+md.basalforcings.floatingice_melting_rate=zeros(md.mesh.numberofvertices,1);
+
+
+%Numerical parameters
+md.verbose=verbose(0);
+md.settings.waitonlock=30;
+md.groundingline.migration='None';
+
+md.transient=deactivateall(md.transient);
+
+%Change name so that no test have the same name
+A=dbstack;
+if (length(A)>2), md.miscellaneous.name=A(3).file(1:end-2); end
Index: /issm/trunk/test/Par/SquareNoDyn.py
===================================================================
--- /issm/trunk/test/Par/SquareNoDyn.py	(revision 24686)
+++ /issm/trunk/test/Par/SquareNoDyn.py	(revision 24686)
@@ -0,0 +1,54 @@
+import os.path
+import numpy as np
+import inspect
+from verbose import verbose
+from transient import transient
+from paterson import paterson
+from arch import *
+
+#Start defining model parameters here
+
+#Geometry
+md.geometry.thickness = 1000.0 * np.ones((md.mesh.numberofvertices))
+md.geometry.base = np.zeros((md.mesh.numberofvertices))
+md.geometry.surface = md.geometry.base + md.geometry.thickness
+
+# #Materials
+md.initialization.temperature = (273. - 20.) * np.ones((md.mesh.numberofvertices))
+md.materials.rheology_B = paterson(md.initialization.temperature)
+md.materials.rheology_n = 3. * np.ones((md.mesh.numberofelements))
+
+#Friction
+md.friction.coefficient = 20. * np.ones((md.mesh.numberofvertices))
+md.friction.coefficient[np.where(md.mask.groundedice_levelset < 0.)[0]] = 0.
+md.friction.p = np.ones((md.mesh.numberofelements))
+md.friction.q = np.ones((md.mesh.numberofelements))
+
+#Some necessary fields to fool checkonsistency
+md.initialization.vx = np.zeros((md.mesh.numberofvertices))
+md.initialization.vy = np.zeros((md.mesh.numberofvertices))
+md.initialization.vz = np.zeros((md.mesh.numberofvertices))
+md.initialization.pressure = np.zeros((md.mesh.numberofvertices))
+
+md.stressbalance.spcvx = np.zeros((md.mesh.numberofvertices))
+md.stressbalance.spcvy = np.zeros((md.mesh.numberofvertices))
+md.stressbalance.spcvz = np.zeros((md.mesh.numberofvertices))
+
+md.stressbalance.referential = float('nan') * np.ones((md.mesh.numberofvertices, 6))
+md.stressbalance.loadingforce = np.zeros((md.mesh.numberofvertices, 3))
+
+md.smb.mass_balance = np.zeros((md.mesh.numberofvertices))
+
+md.basalforcings.groundedice_melting_rate = np.zeros((md.mesh.numberofvertices))
+md.basalforcings.floatingice_melting_rate = np.zeros((md.mesh.numberofvertices))
+
+#Numerical parameters
+md.verbose = verbose(0)
+md.settings.waitonlock = 30
+md.groundingline.migration = 'None'
+
+md.transient = transient.setallnullparameters(md.transient)
+
+#Change name so that no test have the same name
+if len(inspect.stack()) > 2:
+    md.miscellaneous.name = os.path.basename(inspect.stack()[2][1]).split('.')[0]
